diff --git a/.JuliaFormatter.toml b/.JuliaFormatter.toml
new file mode 100644
index 0000000..453925c
--- /dev/null
+++ b/.JuliaFormatter.toml
@@ -0,0 +1 @@
+style = "sciml"
\ No newline at end of file
diff --git a/.github/workflows/CompatHelper.yml b/.github/workflows/CompatHelper.yml
index cba9134..5577817 100644
--- a/.github/workflows/CompatHelper.yml
+++ b/.github/workflows/CompatHelper.yml
@@ -1,16 +1,43 @@
-name: CompatHelper
on:
schedule:
- cron: 0 0 * * *
workflow_dispatch:
+permissions:
+ contents: write
+ pull-requests: write
jobs:
CompatHelper:
runs-on: ubuntu-latest
steps:
- - name: Pkg.add("CompatHelper")
- run: julia -e 'using Pkg; Pkg.add("CompatHelper")'
- - name: CompatHelper.main()
+ - name: Check if Julia is already available in the PATH
+ id: julia_in_path
+ run: which julia
+ continue-on-error: true
+ - name: Install Julia, but only if it is not already available in the PATH
+ uses: julia-actions/setup-julia@v1
+ with:
+ version: "1"
+ arch: ${{ runner.arch }}
+ if: steps.julia_in_path.outcome != 'success'
+ - name: "Add the General registry via Git"
+ run: |
+ import Pkg
+ ENV["JULIA_PKG_SERVER"] = ""
+ Pkg.Registry.add("General")
+ shell: julia --color=yes {0}
+ - name: "Install CompatHelper"
+ run: |
+ import Pkg
+ name = "CompatHelper"
+ uuid = "aa819f21-2bde-4658-8897-bab36330d9b7"
+ version = "3"
+ Pkg.add(; name, uuid, version)
+ shell: julia --color=yes {0}
+ - name: "Run CompatHelper"
+ run: |
+ import CompatHelper
+ CompatHelper.main()
+ shell: julia --color=yes {0}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COMPATHELPER_PRIV: ${{ secrets.DOCUMENTER_KEY }}
- run: julia -e 'using CompatHelper; CompatHelper.main()'
diff --git a/.github/workflows/SpellCheck.yml b/.github/workflows/SpellCheck.yml
new file mode 100644
index 0000000..ed4fe17
--- /dev/null
+++ b/.github/workflows/SpellCheck.yml
@@ -0,0 +1,13 @@
+name: Spell Check
+
+on: [pull_request]
+
+jobs:
+ typos-check:
+ name: Spell Check with Typos
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout Actions Repository
+ uses: actions/checkout@v4
+ - name: Check spelling
+ uses: crate-ci/typos@v1.18.0
diff --git a/.github/workflows/TagBot.yml b/.github/workflows/TagBot.yml
index f49313b..0cd3114 100644
--- a/.github/workflows/TagBot.yml
+++ b/.github/workflows/TagBot.yml
@@ -4,6 +4,22 @@ on:
types:
- created
workflow_dispatch:
+ inputs:
+ lookback:
+ default: "3"
+permissions:
+ actions: read
+ checks: read
+ contents: write
+ deployments: read
+ issues: read
+ discussions: read
+ packages: read
+ pages: read
+ pull-requests: read
+ repository-projects: read
+ security-events: read
+ statuses: read
jobs:
TagBot:
if: github.event_name == 'workflow_dispatch' || github.actor == 'JuliaTagBot'
diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml
deleted file mode 100644
index 175cb07..0000000
--- a/.github/workflows/benchmark.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-name: Run benchmarks
-
-on:
- pull_request:
-
-jobs:
- Benchmark:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v2
- - uses: julia-actions/setup-julia@latest
- with:
- version: 1
- - uses: julia-actions/julia-buildpkg@latest
- - name: Install dependencies
- run: julia -e 'using Pkg; pkg"add PkgBenchmark BenchmarkCI@0.1"'
- - name: Run benchmarks
- run: julia -e 'using BenchmarkCI; BenchmarkCI.judge()'
- - name: Print judgement
- run: julia -e 'using BenchmarkCI; BenchmarkCI.displayjudgement()'
- - name: Post results
- run: julia -e 'using BenchmarkCI; BenchmarkCI.postjudge()'
- - name: Push results
- run: julia -e "using BenchmarkCI; BenchmarkCI.pushresult()"
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 5b00881..5d37369 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -1,16 +1,48 @@
name: CI
on:
+ pull_request:
+ branches:
+ - main
+ - dev
+ paths-ignore:
+ - "docs/**"
push:
branches:
- main
- tags: '*'
- pull_request:
-concurrency:
- # Skip intermediate builds: always.
- # Cancel intermediate builds: only if it is a pull request build.
- group: ${{ github.workflow }}-${{ github.ref }}
- cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }}
+ paths-ignore:
+ - "docs/**"
jobs:
+ formatter:
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ julia-version: [1]
+ julia-arch: [x86]
+ os: [ubuntu-latest]
+ steps:
+ - uses: julia-actions/setup-julia@latest
+ with:
+ version: ${{ matrix.julia-version }}
+
+ - uses: actions/checkout@v4
+ - name: Install JuliaFormatter and format
+ # This will use the latest version by default but you can set the version like so:
+ #
+ # julia -e 'using Pkg; Pkg.add(PackageSpec(name="JuliaFormatter", version="0.13.0"))'
+ run: |
+ julia -e 'using Pkg; Pkg.add(PackageSpec(name="JuliaFormatter", version="1.0.50"))'
+ julia -e 'using JuliaFormatter; format(".", verbose=true)'
+ - name: Format check
+ run: |
+ julia -e '
+ out = Cmd(`git diff`) |> read |> String
+ if out == ""
+ exit(0)
+ else
+ @error "Some files have not been formatted !!!"
+ write(stdout, out)
+ exit(1)
+ end'
test:
name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }}
runs-on: ${{ matrix.os }}
@@ -18,29 +50,34 @@ jobs:
fail-fast: false
matrix:
version:
- - "1.9"
- - 'nightly'
+ - "1.8"
+ - "1" # automatically expands to the latest stable 1.x release of Julia
+ - nightly
os:
- ubuntu-latest
- - macOS-latest
- - windows-latest
+ threads:
+ - "2"
arch:
- x64
- x86
- threads:
- - "2"
- exclude:
+ include:
+ # test macOS and Windows with latest Julia only
- os: macOS-latest
- arch: x86
+ arch: x64
+ version: 1
+ - os: windows-latest
+ arch: x64
+ version: 1
- os: windows-latest
arch: x86
+ version: 1
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- uses: julia-actions/setup-julia@v1
with:
version: ${{ matrix.version }}
arch: ${{ matrix.arch }}
- - uses: actions/cache@v1
+ - uses: actions/cache@v4
env:
cache-name: cache-artifacts
with:
@@ -55,29 +92,29 @@ jobs:
env:
JULIA_NUM_THREADS: ${{ matrix.threads }}
- uses: julia-actions/julia-processcoverage@v1
- - uses: codecov/codecov-action@v1
+ - uses: codecov/codecov-action@v4
with:
file: lcov.info
- threshold': 5%
- docs:
- name: Documentation
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v3
- - uses: julia-actions/setup-julia@v1
- with:
- version: "1"
- - run: |
- julia --project=docs -e '
- using Pkg
- Pkg.develop(PackageSpec(path=pwd()))
- Pkg.instantiate()'
- - run: |
- julia --project=docs -e '
- using Documenter: doctest
- using LocalSearchSolvers
- doctest(LocalSearchSolvers)'
- - run: julia --project=docs docs/make.jl
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }}
+ # docs:
+ # name: Documentation
+ # runs-on: ubuntu-latest
+ # steps:
+ # - uses: actions/checkout@v4
+ # - uses: julia-actions/setup-julia@v1
+ # with:
+ # version: "1"
+ # - run: |
+ # julia --project=docs -e '
+ # using Pkg
+ # Pkg.develop(PackageSpec(path=pwd()))
+ # Pkg.instantiate()'
+ # - run: |
+ # julia --project=docs -e '
+ # using Documenter: DocMeta, doctest
+ # using Constraints
+ # DocMeta.setdocmeta!(Constraints, :DocTestSetup, :(using Constraints); recursive=true)
+ # doctest(Constraints)'
+ # - run: julia --project=docs docs/make.jl
+ # env:
+ # GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ # DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }}
diff --git a/.github/workflows/register.yml b/.github/workflows/register.yml
new file mode 100644
index 0000000..5b7cd3b
--- /dev/null
+++ b/.github/workflows/register.yml
@@ -0,0 +1,16 @@
+name: Register Package
+on:
+ workflow_dispatch:
+ inputs:
+ version:
+ description: Version to register or component to bump
+ required: true
+jobs:
+ register:
+ runs-on: ubuntu-latest
+ permissions:
+ contents: write
+ steps:
+ - uses: julia-actions/RegisterAction@latest
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/Project.toml b/Project.toml
index d8a2699..599b0dc 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "LocalSearchSolvers"
uuid = "2b10edaa-728d-4283-ac71-07e312d6ccf3"
authors = ["Jean-Francois Baffier"]
-version = "0.4.3"
+version = "0.4.4"
[deps]
CompositionalNetworks = "4b67e4b5-442d-4ef5-b760-3f5df3a57537"
@@ -13,19 +13,26 @@ Distributed = "8ba89e20-285c-5b6f-9357-94700520ee1b"
JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
Lazy = "50d2b5c4-7a5e-59d5-8109-a42b560f39c0"
OrderedCollections = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
+TestItemRunner = "f8b46487-2199-4994-9208-9a1283c18c0a"
+TestItems = "1c621080-faea-4a02-84b6-bbd5e436b8fe"
[compat]
CompositionalNetworks = "0.5"
ConstraintDomains = "0.3"
Constraints = "0.5"
-Dictionaries = "0.3"
+Dates = "1"
+Dictionaries = "0.4"
+Distributed = "1"
JSON = "0.21"
Lazy = "0.15"
-OrderedCollections = "1.6"
+OrderedCollections = "1"
+TestItemRunner = "0.2"
+TestItems = "0.1"
julia = "1.6"
[extras]
+Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[targets]
-test = ["Test"]
+test = ["Aqua", "Test"]
diff --git a/benchmark/Project.toml b/benchmark/Project.toml
deleted file mode 100644
index 01b78e8..0000000
--- a/benchmark/Project.toml
+++ /dev/null
@@ -1,4 +0,0 @@
-[deps]
-BenchmarkCI = "20533458-34a3-403d-a444-e18f38190b5b"
-BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
-LocalSearchSolvers = "2b10edaa-728d-4283-ac71-07e312d6ccf3"
diff --git a/benchmark/benchmarks.jl b/benchmark/benchmarks.jl
deleted file mode 100644
index f8d6ce3..0000000
--- a/benchmark/benchmarks.jl
+++ /dev/null
@@ -1,33 +0,0 @@
-using BenchmarkTools
-using LocalSearchSolvers
-
-const suite = BenchmarkGroup()
-
-suite["adaptive"] = BenchmarkGroup(["integer", "discrete", "all different"])
-
-# bench for the error functions and predicate of usual constraints
-suite["constraints"] = BenchmarkGroup(["error", "predicate", "automatic", "handmade"])
-for c in [all_different]
- for i in 0:10
- n = 2^i
- values = rand(1:2n, n)
- suite["constraints"][string(c), length(values)] = @benchmarkable $(c)($values...)
- end
-end
-
-# bench for the different problems modeling
-suite["problems"] = BenchmarkGroup(["generation"])
-for p in [sudoku]
- for size in 2:10
- suite["problems"][string(p), size] = @benchmarkable $(p)($size)
- end
-end
-
-## commands to store the tuning parameters
-# tune!(suite)
-# BenchmarkTools.save("benchmark/params.json", params(suite));
-
-## syntax is loadparams!(group, paramsgroup, fields...)
-# loadparams!(suite, BenchmarkTools.load("benchmark/params.json")[1], :evals, :samples);
-
-results = run(suite, verbose = true, seconds = 1)
diff --git a/docs/Project.toml b/docs/Project.toml
deleted file mode 100644
index 3eb27f8..0000000
--- a/docs/Project.toml
+++ /dev/null
@@ -1,8 +0,0 @@
-[deps]
-CBLS = "a3809bfe-37bb-4d48-a667-bac4c6be8d90"
-CompositionalNetworks = "4b67e4b5-442d-4ef5-b760-3f5df3a57537"
-ConstraintDomains = "5800fd60-8556-4464-8d61-84ebf7a0bedb"
-ConstraintModels = "841a6ec5-cac3-4c42-9a0a-4b21c9553698"
-Constraints = "30f324ab-b02d-43f0-b619-e131c61659f7"
-Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
-LocalSearchSolvers = "2b10edaa-728d-4283-ac71-07e312d6ccf3"
diff --git a/docs/make.jl b/docs/make.jl
deleted file mode 100644
index 2c7fab6..0000000
--- a/docs/make.jl
+++ /dev/null
@@ -1,54 +0,0 @@
-using LocalSearchSolvers
-using CBLS
-using ConstraintModels
-using ConstraintDomains
-using CompositionalNetworks
-using Constraints
-using Documenter
-
-@info "Makeing documentation..."
-makedocs(;
- modules=[LocalSearchSolvers, CBLS, ConstraintModels,
- ConstraintDomains, CompositionalNetworks, Constraints],
- expandfirst = ["variables.md", "constraints.md", "objectives.md", "solving.md"],
- authors="Jean-François Baffier",
- repo="https://github.com/JuliaConstraints/LocalSearchSolvers.jl/blob/{commit}{path}#L{line}",
- sitename="LocalSearchSolvers.jl",
- format=Documenter.HTML(;
- prettyurls=get(ENV, "CI", nothing) == "true",
- canonical="https://JuliaConstraints.github.io/LocalSearchSolvers.jl",
- assets = ["assets/favicon.ico"; "assets/github_buttons.js"; "assets/custom.css"],
- ),
- pages=[
- "Home" => "index.md",
- "Manual" => [
- "Quick Start Guide" => "quickstart.md",
- "Variables" => "variables.md",
- "Constraints" => "constraints.md",
- "Objectives" => "objectives.md",
- "Solving" => "solving.md",
- ],
- "Examples" => [
- "Sudoku" => "sudoku.md",
- "Golomb ruler" => "golomb.md",
- "Mincut" => "mincut.md",
- ],
- "Related" => [
- "ConstraintDomains.jl" => "domain.md",
- "Constraints.jl" => "d_constraint.md",
- "CompositionalNetworks.jl" => "icn.md",
- "CBLS.jl" => "cbls.md",
- "ConstraintModels.jl" => "models.md",
- ],
- "Library" => [
- "Public" => "public.md",
- "Internals" => "internals.md",
- ],
- "Constributing" => "contributing.md",
- ],
-)
-
-deploydocs(;
- repo="github.com/JuliaConstraints/LocalSearchSolvers.jl.git",
- devbranch="main",
-)
diff --git a/docs/src/cbls.md b/docs/src/cbls.md
deleted file mode 100644
index cf130c1..0000000
--- a/docs/src/cbls.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# CBLS.jl
-
-```@autodocs
-Modules = [CBLS]
-Private = false
-```
\ No newline at end of file
diff --git a/docs/src/constraints.md b/docs/src/constraints.md
deleted file mode 100644
index e87a8b5..0000000
--- a/docs/src/constraints.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# Constraints
-
-In the `LocalSearchSolvers.jl` framework, a constraint can be define using either a *concept* (a predicate over a set of variables) or an *error function*. Additionally some constraints are already defined in [Constraints.jl](https://github.com/JuliaConstraints/Constraints.jl).
-
-As the recommended usage is through the `CBLS.jl` package and the `JuMP.jl` interface, we provide the related documentation here.
-
-## Predicates and Error Functions
-
-```@docs
-CBLS.Predicate
-CBLS.Error
-```
-
-Finally, one can compute the error function from a concept automatically using Interpretable Compositional Networks (ICN). Automatic computation through the [CompositionalNetworks.jl](https://github.com/JuliaConstraints/CompositionalNetworks.jl) package will soon be added within the JuMP syntax. In the mean time, please use this dependency directly.
-
-## Usual Constraints
-Some usual constraints are already available directly through JuMP syntax. Do not hesitate to file an issue to include more usual constraints.
-
-```@docs
-CBLS.AllDifferent
-CBLS.AllEqual
-CBLS.AllEqualParam
-CBLS.AlwaysTrue
-CBLS.DistDifferent
-CBLS.Eq
-CBLS.Ordered
-```
-
-
diff --git a/docs/src/contributing.md b/docs/src/contributing.md
deleted file mode 100644
index e69de29..0000000
diff --git a/docs/src/d_constraint.md b/docs/src/d_constraint.md
deleted file mode 100644
index b464b02..0000000
--- a/docs/src/d_constraint.md
+++ /dev/null
@@ -1,34 +0,0 @@
-# Constraints.jl
-
-A back-end package for JuliaConstraints front packages, such as `LocalSearchSolvers.jl`.
-
-It provides the following features:
-- A dictionary to store usual constraint: `usual_constraint`, which contains the following entries
- - `:all_different`
- - `:dist_different`
- - `:eq`, `:all_equal`, `:all_equal_param`
- - `:ordered`
- - `:always_true` (mainly for testing default `Constraint()` constructor)
-- For each constraint `c`, the following properties
- - arguments length
- - concept (predicate the variables compliance with `c`)
- - error (a function that evaluate how much `c` is violated)
- - parameters length
- - known symmetries of `c`
-- A learning function using `CompositionalNetworks.jl`. If no error function is given when instantiating `c`, it will check the existence of a composition related to `c` and set the error to it.
-
-Follow the list of the constraints currently stored in `usual_constraint`. Note that if the constraint is named `_my_constraint`, it can be accessed as `usual_constraint[:my_constraint]`.
-
-```@docs
-Constraints.all_different
-Constraints.all_equal
-Constraints.all_equal_param
-Constraints.dist_different
-Constraints.eq
-Constraints.ordered
-```
-
-```@autodocs
-Modules = [Constraints]
-Private = false
-```
\ No newline at end of file
diff --git a/docs/src/domain.md b/docs/src/domain.md
deleted file mode 100644
index 5a70cbe..0000000
--- a/docs/src/domain.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# ConstraintDomains.jl
-
-Currently discrete and continuous domains are supported using the following function.
-
-```@docs
-ConstraintDomains.domain
-```
\ No newline at end of file
diff --git a/docs/src/golomb.md b/docs/src/golomb.md
deleted file mode 100644
index 6cf32a1..0000000
--- a/docs/src/golomb.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# Golomb ruler
-
-Doc is still in construction. Please check `golomb.jl` in `ConstraintModels.jl` for details on the implementation.
-An extensive example is available as a quick-start guide to this package.
-
-## Constructing a Golomb ruler model
-
-```@docs
-ConstraintModels.golomb
-```
\ No newline at end of file
diff --git a/docs/src/icn.md b/docs/src/icn.md
deleted file mode 100644
index fb73739..0000000
--- a/docs/src/icn.md
+++ /dev/null
@@ -1,58 +0,0 @@
-# CompositionalNetworks.jl
-
-```@contents
-Pages = ["public.md"]
-Depth = 5
-```
-
-`CompositionalNetworks.jl`, a Julia package for Interpretable Compositional Networks (ICN), a variant of neural networks, allowing the user to get interpretable results, unlike regular artificial neural networks.
-
-The current state of our ICN focuses on the composition of error functions for `LocalSearchSolvers.jl`, but produces results independently of it and export it to either/both Julia functions or/and human readable output.
-
-### How does it work?
-
-The package comes with a basic ICN for learning global constraints. The ICN is composed of 4 layers: `transformation`, `arithmetic`, `aggregation`, and `comparison`. Each contains several operations that can be composed in various ways.
-Given a `concept` (a predicate over the variables' domains), a metric (`hamming` by default), and the variables' domains, we learn the binary weights of the ICN.
-
-## Installation
-
-```julia
-] add CompositionalNetworks
-```
-
-As the package is in a beta version, some changes in the syntax and features are likely to occur. However, those changes should be minimal between minor versions. Please update with caution.
-
-## Quickstart
-
-```julia
-# 4 variables in 1:4
-doms = [domain([1,2,3,4]) for i in 1:4]
-
-# allunique concept (that is used to define the :all_different constraint)
-err = explore_learn_compose(allunique, domains=doms)
-# > interpretation: identity ∘ count_positive ∘ sum ∘ count_eq_left
-
-# test our new error function
-@assert err([1,2,3,3], dom_size = 4) > 0.0
-
-# export an all_different function to file "current/path/test_dummy.jl"
-compose_to_file!(icn, "all_different", "test_dummy.jl")
-```
-
-The output file should produces a function that can be used as follows (assuming the maximum domain size is `7`)
-
-```julia
-import CompositionalNetworks
-
-all_different([1,2,3,4,5,6,7]; dom_size = 7)
-# > 0.0 (which means true, no errors)
-```
-
-Please see `JuliaConstraints/Constraints.jl/learn.jl` for an extensive example of ICN learning and compositions.
-
-## Public interface
-
-```@autodocs
-Modules = [CompositionalNetworks]
-Private = false
-```
diff --git a/docs/src/img/Golomb_Ruler-4.svg b/docs/src/img/Golomb_Ruler-4.svg
deleted file mode 100644
index 90c17eb..0000000
--- a/docs/src/img/Golomb_Ruler-4.svg
+++ /dev/null
@@ -1,31 +0,0 @@
-
-
diff --git a/docs/src/img/final_sudoku.svg b/docs/src/img/final_sudoku.svg
deleted file mode 100644
index e5512df..0000000
--- a/docs/src/img/final_sudoku.svg
+++ /dev/null
@@ -1,879 +0,0 @@
-
-
-
\ No newline at end of file
diff --git a/docs/src/img/sudoku3x3.png b/docs/src/img/sudoku3x3.png
deleted file mode 100644
index f56772a..0000000
Binary files a/docs/src/img/sudoku3x3.png and /dev/null differ
diff --git a/docs/src/index.md b/docs/src/index.md
deleted file mode 100644
index ce6830e..0000000
--- a/docs/src/index.md
+++ /dev/null
@@ -1,86 +0,0 @@
-```@meta
-CurrentModule = LocalSearchSolvers
-```
-
-# Constraint-Based Local Search Framework
-
-The **LocalSearchSolvers.jl** framework proposes sets of technical components of Constraint-Based Local Search (CBLS) solvers and combine them in various ways. Make your own CBLS solver!
-
-
-
-A higher-level *JuMP* interface is available as [CBLS.jl](https://github.com/JuliaConstraints/CBLS.jl) and is the recommended way to use this package. A set of examples is available within [ConstraintModels.jl](https://github.com/JuliaConstraints/ConstraintModels.jl).
-
-![](img/sudoku3x3.png)
-
-### Dependencies
-
-This package makes use of several dependencies from the JuliaConstraints GitHub org:
-- [ConstraintDomains.jl](https://github.com/JuliaConstraints/ConstraintDomains.jl): a domains back-end package for all JuliaConstraints front packages
-- [Constraints.jl](https://github.com/JuliaConstraints/Constraints.jl): a constraints back-end package for all JuliaConstraints front packages
-- [CompositionalNetworks.jl](https://github.com/JuliaConstraints/CompositionalNetworks.jl): a module to learn error functions automatically given a *concept*
-- [Garamon.jl](https://github.com/JuliaConstraints/Garamon.jl) (incoming): geometrical constraints
-
-It also relies on great packages from the julialang ecosystem, among others,
-- [ModernGraphs.jl](https://github.com/Humans-of-Julia/ModernGraphs.jl) (incoming): a dynamic multilayer framework for complex graphs which allows a fine exploration of entangled neighborhoods
-
-### Related packages
-- [JuMP.jl](https://github.com/jump-dev/JuMP.jl): a rich interface for optimization solvers
-- [CBLS.jl](https://github.com/JuliaConstraints/CBLS.jl): the actual interface with JuMP for `LocalSearchSolvers.jl`
-- [ConstraintModels.jl](https://github.com/JuliaConstraints/ConstraintModels.jl): a dataset of models for Constraint Programming
-- [COPInstances.jl](https://github.com/JuliaConstraints/COPInstances.jl) (incoming): a package to store, download, and generate combinatorial optimization instances
-
-### Features
-
-Wanted features list:
-- **Strategies**
- - [ ] *Move*: local move, permutation between `n` variables
- - [ ] *Neighbor*: simple or multiplexed neighborhood, dimension/depth
- - [ ] *Objective(s)*: single/multiple objectives, Pareto, etc.
- - [ ] *Parallel*: distributed and multi-threaded, HPC clusters
- - [ ] *Perturbation*: dynamic, restart, pool of solutions
- - [ ] *Portfolio*: portfolio of solvers, partition in sub-problems
- - [ ] *Restart*
- - [x] restart sequence
- - [ ] partial/probabilistic restart (in coordination with perturbation strategies)
- - [ ] *Selection* of variables: roulette selection, multi-variables, meta-variables (cf subproblem)
- - [ ] *Solution(s)*: management of pool, best versus diverse
- - *Tabu*
- - [x] No Tabu
- - [x] Weak-tabu
- - [x] Keen-tabu
- - *Termination*: when, why, how, interactive, results storage (remote)
-- **Featured strategies**
- - [ ] Adaptive search
- - [ ] Extremal optimization
-- **Others**
- - [ ] Resolution of problems
- - [x] SATisfaction
- - [x] OPTimisation (single-objective)
- - [ ] OPTimisation (multiple-objective)
- - [ ] Dynamic problems
- - [ ] Domains
- - [x] Discrete domains (any type of numbers)
- - [x] Continuous domains
- - [ ] Arbitrary Objects such as physical ones
- - [ ] Domain Specific Languages (DSL)
- - [x] Straight Julia `:raw`
- - [x] JuMP*ish* | MathOptInterface.jl
- - [ ] MiniZinc
- - [ ] OR-tools ?
- - [ ] Learning settings (To be incorporated in [MetaStrategist.jl](https://github.com/JuliaConstraints/MetaStrategist.jl))
- - [x] Compositional Networks (error functions, cost functions)
- - [ ] Reinforcement learning for above mentioned learning features
- - [ ] Automatic benchmarking and learning from all the possible parameter combination (instance, model, solver, size, restart, hardware, etc.)
-
-### Contributing
-
-Contributions to this package are more than welcome and can be arbitrarily, and not exhaustively, split as follows:
-- All features mentioned above
-- Adding new constraints and symmetries
-- Adding new problems and instances
-- Adding new ICNs to learn error of existing constraints
-- Creating other compositional networks which target other kind of constraints
-- Just making stuff better, faster, user-friendlier, etc.
-
-#### Contact
-Do not hesitate to contact me (@azzaare) or other members of JuliaConstraints on GitHub (file an issue), the julialang [Discourse](https://discourse.julialang.org) forum, the julialang [Slack](https://julialang.org/slack/) workspace, the julialang [Zulip](https://julialang.zulipchat.com/) server (*Constraint Programming* stream), or the Humans of Julia [Humans-of-Julia](https://humansofjulia.org/) discord server(*julia-constraint* channel).
diff --git a/docs/src/internals.md b/docs/src/internals.md
deleted file mode 100644
index 95ee0c9..0000000
--- a/docs/src/internals.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# Internal
-
-```@contents
-Pages = ["internal.md"]
-Depth = 5
-```
-
-```@autodocs
-Modules = [LocalSearchSolvers]
-Public = false
-```
diff --git a/docs/src/mincut.md b/docs/src/mincut.md
deleted file mode 100644
index c946042..0000000
--- a/docs/src/mincut.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# Mincut
-
-Doc is still in construction. Please check `mincut.jl` in `ConstraintModels.jl` for details on the implementation.
-
-## Constructing a Mincut model
-
-Note that the Interdiction Cut problem is NP-hard.
-
-```@docs
-ConstraintModels.mincut
-```
\ No newline at end of file
diff --git a/docs/src/models.md b/docs/src/models.md
deleted file mode 100644
index 4d690f1..0000000
--- a/docs/src/models.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# ConstraintModels.jl
-
-```@autodocs
-Modules = [ConstraintModels]
-Private = false
-```
\ No newline at end of file
diff --git a/docs/src/objectives.md b/docs/src/objectives.md
deleted file mode 100644
index 94f59ff..0000000
--- a/docs/src/objectives.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# Objectives
-
-Once a satisfying solution has been reached, the solver will try to minimize the provided objective function if any.
-
-As the recommended usage is through the `CBLS.jl` package and the `JuMP.jl` interface, we provide the related documentation here.
-
-### JuMP syntax (recommended)
-
-```@docs
-CBLS.ScalarFunction
-```
diff --git a/docs/src/public.md b/docs/src/public.md
deleted file mode 100644
index df793ca..0000000
--- a/docs/src/public.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# Public
-
-```@contents
-Pages = ["public.md"]
-Depth = 5
-```
-
-```@autodocs
-Modules = [LocalSearchSolvers]
-Private = false
-```
diff --git a/docs/src/quickstart.md b/docs/src/quickstart.md
deleted file mode 100644
index 6d2f4f6..0000000
--- a/docs/src/quickstart.md
+++ /dev/null
@@ -1,69 +0,0 @@
-# Quick Start Guide
-This section introduce the main concepts of `LocalSearchSolvers.jl`. We model both a satisfaction and an optimization version of the [Golomb Ruler](https://en.wikipedia.org/wiki/Golomb_ruler) problem.
-For this quick-start, we will use [JuMP.jl](https://github.com/jump-dev/JuMP.jl).
-
-## Golomb Ruler
-From Wikipedia's English page.
-> In mathematics, a Golomb ruler is a set of marks at integer positions along an imaginary ruler such that no two pairs of marks are the same distance apart. The number of marks on the ruler is its order, and the largest distance between two of its marks is its length. Translation and reflection of a Golomb ruler are considered trivial, so the smallest mark is customarily put at 0 and the next mark at the smaller of its two possible values.
-
-![](img/Golomb_Ruler-4.svg)
-
-### Satisfaction version
-Given a number of marks `n` and a ruler length `L`, we can model our problem in Julia as easily as follows. First create an empty problem.
-
- ```julia
-using CBLS # the JuMP interface for LocalSearchSolvers.jl
-using JuMP
-
-model = Model(CBLS.Optimizer)
-```
-
-Then add `n` variables with domain `0:L`.
-
-```julia
-n = 4 # marks
-L = n^2 # ruler length
-@variable(model, X[1:n], DiscreteSet(0:L))
-```
-
-Finally add the following constraints,
-* all marks have a different value
-* marks are ordered (optional)
-* finally, no two pairs of marks are the same distance apart
-
-```julia
-@constraint(model, X in AllDifferent()) # different marks
-@constraint(model, X in Ordered()) # for output layout, keep them ordered
-
-# No two pairs have the same length
-for i in 1:(n - 1), j in (i + 1):n, k in i:(n - 1), l in (k + 1):n
- (i, j) < (k, l) || continue
- @constraint(model, [X[i], X[j], X[k], X[l]] in DistDifferent())
-end
-```
-
-### Optimization version
-A Golomb ruler can be either optimally dense (maximal `m` for a given `L`) or optimally short (minimal `L` for a given `n`). Until `LocalSearchSolvers.jl` implements dynamic problems, only optimal shortness is provided.
-
-The model objective is then to minimize the maximum distance between the two extrema marks in the ruler. As the domains are positive, we can simply minimize the maximum value.
-
-```julia
-@objective(model, Min, ScalarFunction(maximum))
-```
-
-### Ruling the solver
-For either version, the solver is built and run in a similar way. Please note that the satisfaction one will stop if a solution is found. The other will run until the maximum number of iteration is reached (1000 by default).
-
-```julia
-optimize!(model)
-```
-
-And finally retrieve the (best-known) solution info.
-
-```julia
-result = value.(X)
-@info "Golomb marks: $result"
-```
-
-Please note, that the Golomb Ruler is already implemented in the package as `golomb(n::Int, L::Int=n^2)`.
-
diff --git a/docs/src/solving.md b/docs/src/solving.md
deleted file mode 100644
index 79f343e..0000000
--- a/docs/src/solving.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# Modeling and solving
-
-Ideally, given a problem, one just want to model and solve. That is what *LocalSearchSolvers* is aiming for. Here we only provide JuMP syntax.
-
-## Model
-```julia
-using CBLS, JuMP
-
-model = Model(CBLS.Optimizer) # CBLS is an exported alias of LocalSearchSolvers
-
-# add variables (cf Variables section)
-# add constraints (cf Constraints section)
-# add objective (cf Objectives section)
-```
-
-## Solver
-
-```julia
-# run the solver. If no objectives are provided, it will look for a satisfying solution and stop
-optimize!(model)
-
-# extract the values (assuming X, a (collection of) variable(s) is the target)
-solution = value.(X)
-```
-
-### Solver options
-
-```@docs
-LocalSearchSolvers.Options
-```
diff --git a/docs/src/sudoku.md b/docs/src/sudoku.md
deleted file mode 100644
index b288d6b..0000000
--- a/docs/src/sudoku.md
+++ /dev/null
@@ -1,84 +0,0 @@
-# Sudoku
-
-From Wikipedia's English page.
-> Sudoku is a logic-based, combinatorial number-placement puzzle. In classic sudoku, the objective is to fill a 9×9 grid with digits so that each column, each row, and each of the nine 3×3 subgrids that compose the grid contain all of the digits from 1 to 9. The puzzle setter provides a partially completed grid, which for a well-posed puzzle has a single solution.
-
-Each column, row, and region of the sudoku grid can only have a number from each of 1 to 9.
-
-For instance, given this initial grid:
-```@raw html
-
-```
-
-The final state (i.e. solution) must be:
-```@raw html
-
-```
-
-## Constructing a sudoku model
-
-```@docs
-ConstraintModels.sudoku
-```
-## Detailed implementation
-
-To start modeling with Sudoku with the solver, we will use [JuMP.jl](https://github.com/jump-dev/JuMP.jl) syntax.
-
-* First, create a model with `JuMP.Model(CBLS.Optimizer)`. Given `n = 3` the grid will be of size `n^2 by n^2` (i.e. 9×9)
-
- ```julia
-using CBLS # the JuMP interface of LocalSearchSolvers.jl
-using JuMP
-
-N = n^2
-model = JuMP.Model(CBLS.Optimizer)
-```
-
-* Create a matrix of variables, where each variable represents a cell of the Sudoku, this means that every variable must be an integer between 1 and 9.
-(If initial values are provided, the variables representing the known values take will be constant variables, and the rest of the unknown variables are initialized as integers between 1 and 9)
-
- ```julia
-# Create and initialize variables.
-if isnothing(start) # If no initial configuration is provided
- @variable(m, X[1:N, 1:N], DiscreteSet(1:N)) # Create a matrix of N*N variables with values from 1 to N
-else
- @variable(m, X[1:N, 1:N]) # Create a matrix of N*N variables with no value taken yet
- for i in 1:N, j in 1:N # Iterate through the matrix
- v_ij = start[i,j] # Retrieve the value of the current cell
- if 1 ≤ v_ij ≤ N # If the value of the current cell is a number between 1 and N (i.e. already provided by the initial configuration)
- # Create a constraint forcing the variable representing the current cell to be a constant equal to the value provided by the initial configuration
- @constraint(m, X[i,j] in DiscreteSet(v_ij))
- else
- @constraint(m, X[i,j] in DiscreteSet(1:N)) # Else create a constraint stating that the variable must be between 1 and N
- end
- end
-end
-```
-
-* Define the rows, columns and block constraints. The solver has a Global Constraint `AllDifferent()` stating that a set of variables must have different values.
-
- ```julia
-for i in 1:N
- @constraint(m, X[i,:] in AllDifferent()) # All variables on the same row must be different
- @constraint(m, X[:,i] in AllDifferent()) # All variables on the same column must be different
-end
-for i in 0:(n-1), j in 0:(n-1)
- @constraint(m, vec(X[(i*n+1):(n*(i+1)), (j*n+1):(n*(j+1))]) in AllDifferent()) # All variables on the same block must be different
-end
- ```
-
-* Finally, solve model using the `optimize!()` function with the model in arguments *
- ```julia
-# Run the solver
-optimize!(m)
-```
-
-After model is solved, use `value.(grid)` to get the final value of all variables on the grid
-matrix, and display the solution using the `display()` function
-
-```julia
-# Retrieve and display the values
-solution = value.(grid)
-display(solution, Val(:sudoku))
-```
-
diff --git a/docs/src/variables.md b/docs/src/variables.md
deleted file mode 100644
index 275ab36..0000000
--- a/docs/src/variables.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# Variables
-
-## Domains
-
-In the `LocalSearchSolvers.jl` framework, a variable is mainly defined by its domain. A domain can be *continuous*, *discrete*, or *mixed*. All the domain implementation is available at [ConstraintDomains.jl](https://github.com/JuliaConstraints/ConstraintDomains.jl).
-
-Currently, only discrete domains are available.
-
-Domains can be used both statically or dynamically.
-
-### JuMP syntax (recommended)
-
-```julia
-# free variable named x
-@variable(model, x)
-
-# free variables in a X vector
-@variable(model, X[1:5])
-
-# variables with discrete domain 1:9 in a matrix M
-@variable(model, M[1:9,1:9] in DiscreteSet(1:9))
-```
diff --git a/src/LocalSearchSolvers.jl b/src/LocalSearchSolvers.jl
index 9214b99..6ae8493 100644
--- a/src/LocalSearchSolvers.jl
+++ b/src/LocalSearchSolvers.jl
@@ -52,7 +52,7 @@ include("strategies/objective.jl")
include("strategies/parallel.jl")
include("strategies/perturbation.jl")
include("strategies/portfolio.jl")
-include("strategies/tabu.jl") # preceed restart.jl
+include("strategies/tabu.jl") # precede restart.jl
include("strategies/restart.jl")
include("strategies/selection.jl")
include("strategies/solution.jl")
diff --git a/src/constraint.jl b/src/constraint.jl
index 28b914d..6dc941d 100644
--- a/src/constraint.jl
+++ b/src/constraint.jl
@@ -56,7 +56,7 @@ function constraint(f, vars)
g = f
if !b1 || b2
- g = (x; X=nothing) -> f(x)
+ g = (x; X = nothing) -> f(x)
end
- return Constraint(g, collect(Int == Int32 ? map(Int,vars) : vars))
+ return Constraint(g, collect(Int == Int32 ? map(Int, vars) : vars))
end
diff --git a/src/model.jl b/src/model.jl
index fa704b4..9244ab6 100644
--- a/src/model.jl
+++ b/src/model.jl
@@ -20,10 +20,11 @@ struct _Model{V <: Variable{<:AbstractDomain},C <: Constraint{<:Function},O <: O
end
```
"""
-struct _Model{V <: Variable{<:AbstractDomain},C <: Constraint{<:Function},O <: Objective{<:Function}}# <: MOI.ModelLike
- variables::Dictionary{Int,V}
- constraints::Dictionary{Int,C}
- objectives::Dictionary{Int,O}
+struct _Model{V <: Variable{<:AbstractDomain},
+ C <: Constraint{<:Function}, O <: Objective{<:Function}}# <: MOI.ModelLike
+ variables::Dictionary{Int, V}
+ constraints::Dictionary{Int, C}
+ objectives::Dictionary{Int, O}
# counter to add new variables: vars, cons, objs
max_vars::Ref{Int} # TODO: UInt ?
@@ -40,7 +41,7 @@ struct _Model{V <: Variable{<:AbstractDomain},C <: Constraint{<:Function},O <: O
kind::Symbol
# Best known bound
- best_bound::Union{Nothing,Float64}
+ best_bound::Union{Nothing, Float64}
# Time of construction (seconds) since epoch
time_stamp::Float64
@@ -50,18 +51,17 @@ end
model()
Construct a _Model, empty by default. It is recommended to add the constraints, variables, and objectives from an empty _Model. The following keyword arguments are available,
- `vars=Dictionary{Int,Variable}()`: collection of variables
-- `cons=Dictionary{Int,Constraint}()`: collection of cosntraints
+- `cons=Dictionary{Int,Constraint}()`: collection of constraints
- `objs=Dictionary{Int,Objective}()`: collection of objectives
- `kind=:generic`: the kind of problem modeled (useful for specialized methods such as pretty printing)
"""
function model(;
- vars=Dictionary{Int,Variable}(),
- cons=Dictionary{Int,Constraint}(),
- objs=Dictionary{Int,Objective}(),
- kind=:generic,
- best_bound=nothing,
+ vars = Dictionary{Int, Variable}(),
+ cons = Dictionary{Int, Constraint}(),
+ objs = Dictionary{Int, Objective}(),
+ kind = :generic,
+ best_bound = nothing
)
-
max_vars = Ref(zero(Int))
max_cons = Ref(zero(Int))
max_objs = Ref(zero(Int))
@@ -69,7 +69,8 @@ function model(;
specialized = Ref(false)
- _Model(vars, cons, objs, max_vars, max_cons, max_objs, sense, specialized, kind, best_bound, time())
+ _Model(vars, cons, objs, max_vars, max_cons, max_objs,
+ sense, specialized, kind, best_bound, time())
end
"""
@@ -278,7 +279,7 @@ end
variable!(m::M, d) where M <: Union{Model, AbstractSolver}
Add a variable with domain `d` to `m`.
"""
-function variable!(m::_Model, d=domain())
+function variable!(m::_Model, d = domain())
add!(m, variable(d))
return _max_vars(m)
end
@@ -310,24 +311,24 @@ function describe(m::_Model) # TODO: rewrite _describe
objectives = "Constraint Satisfaction Program (CSP)"
else
objectives = "Constraint Optimization Program (COP) with Objective(s)\n"
- objectives *=
- mapreduce(o -> "\t\t" * o.name * "\n", *, get_objectives(m); init="")[1:end - 1]
+ objectives *= mapreduce(
+ o -> "\t\t" * o.name * "\n", *, get_objectives(m); init = "")[1:(end - 1)]
end
variables = mapreduce(
x -> "\t\tx$(x[1]): " * string(get_domain(x[2])) * "\n",
- *, pairs(m.variables); init=""
- )[1:end - 1]
- constraints = mapreduce(c -> "\t\tc$(c[1]): " * string(c[2].vars) * "\n", *, pairs(m.constraints); init="")[1:end - 1]
-
- str =
- """
- _Model description
- $objectives
- Variables: $(length(m.variables))
- $variables
- Constraints: $(length(m.constraints))
- $constraints
- """
+ *, pairs(m.variables); init = ""
+ )[1:(end - 1)]
+ constraints = mapreduce(c -> "\t\tc$(c[1]): " * string(c[2].vars) * "\n",
+ *, pairs(m.constraints); init = "")[1:(end - 1)]
+
+ str = """
+ _Model description
+ $objectives
+ Variables: $(length(m.variables))
+ $variables
+ Constraints: $(length(m.constraints))
+ $constraints
+ """
return str
end
@@ -457,13 +458,13 @@ function compute_costs(m, values, X)
return sum(c -> compute_cost(c, values, X), get_constraints(m); init = 0.0)
end
function compute_costs(m, values, cons, X)
- return sum(c -> compute_cost(c, values, X), view(get_constraints(m), cons); init = 0.0)
+ return sum(c -> compute_cost(c, values, X), view(get_constraints(m), cons); init = 0.0)
end
compute_objective(m, values; objective = 1) = apply(get_objective(m, objective), values)
function update_domain!(m, x, d)
- if isempty(get_variable(m,x))
+ if isempty(get_variable(m, x))
old_d = get_variable(m, x).domain
_set_domain!(m, x, d.domain)
else
@@ -472,7 +473,7 @@ function update_domain!(m, x, d)
new_d = if are_continuous
intersect_domains(old_d, d)
else
- intersect_domains(convert(RangeDomain,old_d), convert(RangeDomain, d))
+ intersect_domains(convert(RangeDomain, old_d), convert(RangeDomain, d))
end
_set_domain!(m, x, new_d)
end
diff --git a/src/options.jl b/src/options.jl
index 9e77862..ef28c79 100644
--- a/src/options.jl
+++ b/src/options.jl
@@ -2,13 +2,13 @@ const print_levels = Dict(
:silent => 0,
:minimal => 1,
:partial => 2,
- :verbose => 3,
+ :verbose => 3
)
- # # Tabu times
- # get!(s, :tabu_time, length_vars(s) ÷ 2) # 10?
- # get!(s, :local_tabu, setting(s, :tabu_time) ÷ 2)
- # get!(s, :δ_tabu, setting(s, :tabu_time) - setting(s, :local_tabu))# 20-30
+# # Tabu times
+# get!(s, :tabu_time, length_vars(s) ÷ 2) # 10?
+# get!(s, :local_tabu, setting(s, :tabu_time) ÷ 2)
+# get!(s, :δ_tabu, setting(s, :tabu_time) - setting(s, :local_tabu))# 20-30
"""
Options()
@@ -36,7 +36,7 @@ set_time_limit_sec(model, 5.0)
mutable struct Options
dynamic::Bool
info_path::String
- iteration::Union{Int,Float64}
+ iteration::Union{Int, Float64}
print_level::Symbol
solutions::Int
specialize::Bool
@@ -47,28 +47,28 @@ mutable struct Options
time_limit::Float64 # seconds
function Options(;
- dynamic=false,
- info_path="",
- iteration=10000,
- print_level=:minimal,
- solutions=1,
- specialize=!dynamic,
- tabu_time=0,
- tabu_local=0,
- tabu_delta=0.0,
- threads=typemax(0),
- time_limit= 60, # seconds
+ dynamic = false,
+ info_path = "",
+ iteration = 10000,
+ print_level = :minimal,
+ solutions = 1,
+ specialize = !dynamic,
+ tabu_time = 0,
+ tabu_local = 0,
+ tabu_delta = 0.0,
+ threads = typemax(0),
+ time_limit = 60 # seconds
)
ds_str = "The model types are specialized to the starting domains, constraints," *
- " and objectives types. Dynamic elements that add a new type will raise an error!"
+ " and objectives types. Dynamic elements that add a new type will raise an error!"
dynamic && specialize && @warn ds_str
notds_str = "The solver types are not specialized in a static model context," *
- " which is sub-optimal."
+ " which is sub-optimal."
!dynamic && !specialize && @info notds_str
itertime_str = "Both iteration and time limits are disabled. " *
- "Optimization runs will run infinitely."
+ "Optimization runs will run infinitely."
iteration == Inf && time_limit == Inf && @warn itertime_str
new(
@@ -82,7 +82,7 @@ mutable struct Options
tabu_local,
tabu_delta,
threads,
- time_limit,
+ time_limit
)
end
end
@@ -250,11 +250,10 @@ DOCSTRING
"""
_time_limit!(options, time) = options.time_limit = time
-
function set_option!(options, name, value)
eval(Symbol("_" * name * "!"))(options, value)
end
function get_option(options, name)
eval(Symbol("_" * name))(options)
-end
\ No newline at end of file
+end
diff --git a/src/solver.jl b/src/solver.jl
index 9075d64..312d416 100644
--- a/src/solver.jl
+++ b/src/solver.jl
@@ -9,7 +9,8 @@ add_time!(::AbstractSolver, i) = nothing
function solver(ms, id, role; pool = pool(), strats = MetaStrategy(ms))
mlid = make_id(meta_id(ms), id, Val(role))
- return solver(mlid, ms.model, ms.options, pool, ms.rc_report, ms.rc_sol, ms.rc_stop, strats, Val(role))
+ return solver(mlid, ms.model, ms.options, pool, ms.rc_report,
+ ms.rc_sol, ms.rc_stop, strats, Val(role))
end
# Forwards from model field
@@ -134,7 +135,7 @@ end
Compute the cost of constraints `c` in `cons_lst`. If `cons_lst` is empty, compute the cost for all the constraints in `s`.
"""
-function _compute_costs!(s; cons_lst=Indices{Int}())
+function _compute_costs!(s; cons_lst = Indices{Int}())
if isempty(cons_lst)
foreach(((id, c),) -> _compute_cost!(s, id, c), pairs(get_constraints(s)))
else
@@ -159,7 +160,7 @@ function _compute_objective!(s, o::Objective)
s.pool = pool(s.state.configuration)
end
end
-_compute_objective!(s, o=1) = _compute_objective!(s, get_objective(s, o))
+_compute_objective!(s, o = 1) = _compute_objective!(s, get_objective(s, o))
"""
_compute!(s; o::Int = 1, cons_lst = Indices{Int}())
@@ -171,7 +172,7 @@ Compute the objective `o`'s value if `s` is satisfied and return the current `er
- `o`: targeted objective
- `cons_lst`: list of targeted constraints, if empty compute for the whole set
"""
-function _compute!(s; o::Int=1, cons_lst=Indices{Int}())
+function _compute!(s; o::Int = 1, cons_lst = Indices{Int}())
_compute_costs!(s; cons_lst)
if get_error(s) == 0.0
_optimizing(s) && _compute_objective!(s, o)
@@ -194,15 +195,17 @@ DOCSTRING
function _neighbours(s, x, dim = 0)
if dim == 0
is_discrete = typeof(get_variable(s, x).domain) <: ContinuousDomain
- return is_discrete ? map(_ -> draw(s, x), 1:(length_vars(s)*length_cons(s))) : get_domain(s, x)
+ return is_discrete ? map(_ -> draw(s, x), 1:(length_vars(s) * length_cons(s))) :
+ get_domain(s, x)
else
neighbours = Set{Int}()
foreach(
- c -> foreach(y ->
- begin
+ c -> foreach(
+ y -> begin
b = _value(s, x) ∈ get_variable(s, y) && _value(s, y) ∈ get_variable(s, x)
b && push!(neighbours, y)
- end, get_vars_from_cons(s, c)),
+ end,
+ get_vars_from_cons(s, c)),
get_cons_from_var(s, x)
)
return delete!(neighbours, x)
@@ -219,21 +222,26 @@ function _init!(s, ::Val{:global})
put!(s.rc_stop, nothing)
foreach(i -> put!(s.rc_report, nothing), setdiff(workers(), [1]))
end
-_init!(s, ::Val{:meta}) = foreach(id -> push!(s.subs, solver(s, id-1, :sub)), 2:nthreads())
+function _init!(s, ::Val{:meta})
+ foreach(id -> push!(s.subs, solver(s, id - 1, :sub)), 2:nthreads())
+end
function _init!(s, ::Val{:remote})
for w in setdiff(workers(), [1])
ls = remotecall(solver, w, s, w, :lead)
remote_do(set_option!, w, fetch(ls), "print_level", :silent)
- remote_do(set_option!, w, fetch(ls), "threads",remotecall_fetch(Threads.nthreads, w))
+ remote_do(
+ set_option!, w, fetch(ls), "threads", remotecall_fetch(Threads.nthreads, w))
push!(s.remotes, w => ls)
end
end
function _init!(s, ::Val{:local}; pool = pool())
get_option(s, "tabu_time") == 0 && set_option!(s, "tabu_time", length_vars(s) ÷ 2) # 10?
- get_option(s, "tabu_local") == 0 && set_option!(s, "tabu_local", get_option(s, "tabu_time") ÷ 2)
- get_option(s, "tabu_delta") == 0 && set_option!(s, "tabu_delta", get_option(s, "tabu_time") - get_option(s, "tabu_local")) # 20-30
+ get_option(s, "tabu_local") == 0 &&
+ set_option!(s, "tabu_local", get_option(s, "tabu_time") ÷ 2)
+ get_option(s, "tabu_delta") == 0 && set_option!(
+ s, "tabu_delta", get_option(s, "tabu_time") - get_option(s, "tabu_local")) # 20-30
state!(s)
return has_solution(s)
end
@@ -245,7 +253,7 @@ _init!(s, role::Symbol) = _init!(s, Val(role))
Restart a solver.
"""
-function _restart!(s, k=10)
+function _restart!(s, k = 10)
_verbose(s, "\n============== RESTART!!!!================\n")
_draw!(s)
empty_tabu!(s)
@@ -274,7 +282,6 @@ function _select_worse(s)
return _find_rand_argmax(view(_vars_costs(s), nontabu))
end
-
"""
_move!(s, x::Int, dim::Int = 0)
@@ -285,8 +292,11 @@ Perform an improving move in `x` neighbourhood if possible.
- `x`: selected variable id
- `dim`: describe the dimension of the considered neighbourhood
"""
-function _move!(s, x::Int, dim::Int=0)
- best_values = [begin old_v = _value(s, x) end]; best_swap = [x]
+function _move!(s, x::Int, dim::Int = 0)
+ best_values = [begin
+ old_v = _value(s, x)
+ end]
+ best_swap = [x]
tabu = true # unless proved otherwise, this variable is now tabu
best_cost = old_cost = get_error(s)
copy_to!(s.state.fluct, _cons_costs(s), _vars_costs(s))
@@ -297,7 +307,7 @@ function _move!(s, x::Int, dim::Int=0)
_verbose(s, "Compute costs: selected var(s) x_$x " * (dim == 0 ? "= $v" : "⇆ x_$v"))
cons_x_v = union(get_cons_from_var(s, x), dim == 0 ? [] : get_cons_from_var(s, v))
- _compute!(s, cons_lst=cons_x_v)
+ _compute!(s, cons_lst = cons_x_v)
cost = get_error(s)
if cost < best_cost
@@ -343,7 +353,7 @@ function _step!(s)
_, best_swap, tabu = _move!(s, x, 1)
_compute!(s)
else # compute the costs changes from best local move
- _compute!(s; cons_lst=get_cons_from_var(s, x))
+ _compute!(s; cons_lst = get_cons_from_var(s, x))
end
# decay tabu list
@@ -399,7 +409,8 @@ Search the space of configurations.
function solve_while_loop!(s, stop, sat, iter, st)
while stop_while_loop(s, stop, iter, st)
iter += 1
- _verbose(s, "\n\tLoop $(iter) ($(_optimizing(s) ? "optimization" : "satisfaction"))")
+ _verbose(
+ s, "\n\tLoop $(iter) ($(_optimizing(s) ? "optimization" : "satisfaction"))")
_step!(s) && sat && break
_verbose(s, "vals: $(length(_values(s)) > 0 ? _values(s) : nothing)")
best_sub = _check_subs(s)
@@ -411,7 +422,6 @@ function solve_while_loop!(s, stop, sat, iter, st)
end
end
-
"""
remote_dispatch!(solver)
Starts the `LeadSolver`s attached to the `MainSolver`.
@@ -439,7 +449,7 @@ remote_stop!(::AbstractSolver) = nothing
"""
post_process(s::MainSolver)
-Launch a serie of tasks to round-up a solving run, for instance, export a run's info.
+Launch a series of tasks to round-up a solving run, for instance, export a run's info.
"""
post_process(::AbstractSolver) = nothing
diff --git a/src/solvers/lead.jl b/src/solvers/lead.jl
index 2de1ab3..020d1b4 100644
--- a/src/solvers/lead.jl
+++ b/src/solvers/lead.jl
@@ -15,11 +15,13 @@ mutable struct LeadSolver <: MetaSolver
subs::Vector{_SubSolver}
end
-function solver(mlid, model, options, pool, rc_report, rc_sol, rc_stop, strats, ::Val{:lead})
+function solver(
+ mlid, model, options, pool, rc_report, rc_sol, rc_stop, strats, ::Val{:lead})
l_options = deepcopy(options)
set_option!(options, "print_level", :silent)
ss = Vector{_SubSolver}()
- return LeadSolver(mlid, model, l_options, pool, rc_report, rc_sol, rc_stop, state(), strats, ss)
+ return LeadSolver(
+ mlid, model, l_options, pool, rc_report, rc_sol, rc_stop, state(), strats, ss)
end
function _init!(s::LeadSolver)
diff --git a/src/solvers/main.jl b/src/solvers/main.jl
index 15dbae5..ac3536e 100644
--- a/src/solvers/main.jl
+++ b/src/solvers/main.jl
@@ -28,9 +28,9 @@ end
make_id(::Int, id, ::Val{:lead}) = (id, 0)
function solver(model = model();
- options = Options(),
- pool = pool(),
- strategies = MetaStrategy(model),
+ options = Options(),
+ pool = pool(),
+ strategies = MetaStrategy(model)
)
mlid = (1, 0)
rc_report = RemoteChannel(() -> Channel{Nothing}(length(workers())))
@@ -39,7 +39,8 @@ function solver(model = model();
remotes = Dict{Int, Future}()
subs = Vector{_SubSolver}()
ts = TimeStamps(model)
- return MainSolver(mlid, model, options, pool, rc_report, rc_sol, rc_stop, remotes, state(), :not_called, strategies, subs, ts)
+ return MainSolver(mlid, model, options, pool, rc_report, rc_sol, rc_stop,
+ remotes, state(), :not_called, strategies, subs, ts)
end
# Forwards from TimeStamps
@@ -104,7 +105,7 @@ function post_process(s::MainSolver)
info = Dict(
:solution => has_solution(s) ? collect(best_values(s)) : nothing,
:time => time_info(s),
- :type => sat ? "Satisfaction" : "Optimization",
+ :type => sat ? "Satisfaction" : "Optimization"
)
!sat && has_solution(s) && push!(info, :value => best_value(s))
write(path, JSON.json(info))
diff --git a/src/solvers/sub.jl b/src/solvers/sub.jl
index 70f17d5..b6c883f 100644
--- a/src/solvers/sub.jl
+++ b/src/solvers/sub.jl
@@ -18,7 +18,8 @@ mutable struct _SubSolver <: AbstractSolver
strategies::MetaStrategy
end
-function solver(mlid, model, options, pool, ::RemoteChannel, ::RemoteChannel, ::RemoteChannel, strats, ::Val{:sub})
+function solver(mlid, model, options, pool, ::RemoteChannel,
+ ::RemoteChannel, ::RemoteChannel, strats, ::Val{:sub})
sub_options = deepcopy(options)
set_option!(options, "print_level", :silent)
return _SubSolver(mlid, model, sub_options, pool, state(), strats)
diff --git a/src/state.jl b/src/state.jl
index 664309d..fbcd641 100644
--- a/src/state.jl
+++ b/src/state.jl
@@ -35,9 +35,9 @@ function state(m::_Model, pool = pool(); opt = false)
X = Matrix{Float64}(undef, m.max_vars[], CompositionalNetworks.max_icn_length())
lc, lv = length_cons(m) > 0, length_vars(m) > 0
config = Configuration(m, X)
- cons = lc ? zeros(Float64, get_constraints(m)) : Dictionary{Int,Float64}()
+ cons = lc ? zeros(Float64, get_constraints(m)) : Dictionary{Int, Float64}()
last_improvement = 0
- vars = lv ? zeros(Float64, get_variables(m)) : Dictionary{Int,Float64}()
+ vars = lv ? zeros(Float64, get_variables(m)) : Dictionary{Int, Float64}()
fluct = Fluct(cons, vars)
return _State(config, cons, fluct, X, opt, last_improvement, vars)
end
@@ -82,7 +82,7 @@ _vars_costs!(s::_State, costs) = s.vars_costs = costs
_values!(s::S, values) where S <: Union{_State, AbstractSolver}
Set the variables values.
"""
-_values!(s::_State{T}, values) where T <: Number = set_values!(s, values)
+_values!(s::_State{T}, values) where {T <: Number} = set_values!(s, values)
"""
_optimizing!(s::S) where S <: Union{_State, AbstractSolver}
diff --git a/src/strategies/move.jl b/src/strategies/move.jl
index e69de29..8b13789 100644
--- a/src/strategies/move.jl
+++ b/src/strategies/move.jl
@@ -0,0 +1 @@
+
diff --git a/src/strategies/neighbor.jl b/src/strategies/neighbor.jl
index e69de29..8b13789 100644
--- a/src/strategies/neighbor.jl
+++ b/src/strategies/neighbor.jl
@@ -0,0 +1 @@
+
diff --git a/src/strategies/objective.jl b/src/strategies/objective.jl
index e69de29..8b13789 100644
--- a/src/strategies/objective.jl
+++ b/src/strategies/objective.jl
@@ -0,0 +1 @@
+
diff --git a/src/strategies/parallel.jl b/src/strategies/parallel.jl
index e69de29..8b13789 100644
--- a/src/strategies/parallel.jl
+++ b/src/strategies/parallel.jl
@@ -0,0 +1 @@
+
diff --git a/src/strategies/perturbation.jl b/src/strategies/perturbation.jl
index e69de29..8b13789 100644
--- a/src/strategies/perturbation.jl
+++ b/src/strategies/perturbation.jl
@@ -0,0 +1 @@
+
diff --git a/src/strategies/portfolio.jl b/src/strategies/portfolio.jl
index e69de29..8b13789 100644
--- a/src/strategies/portfolio.jl
+++ b/src/strategies/portfolio.jl
@@ -0,0 +1 @@
+
diff --git a/src/strategies/restart.jl b/src/strategies/restart.jl
index 2eb850d..d206f38 100644
--- a/src/strategies/restart.jl
+++ b/src/strategies/restart.jl
@@ -1,5 +1,18 @@
abstract type RestartStrategy end
+# Random restart
+struct RandomRestart <: RestartStrategy
+ reset_percentage::Float64
+end
+
+function restart(::Any, ::Val{:random}; rp = 0.05)
+ return RandomRestart(rp)
+end
+
+function check_restart!(rs::RandomRestart; tabu_length = nothing)
+ return rand() ≤ rs.reset_percentage
+end
+
# Tabu restart
mutable struct TabuRestart <: RestartStrategy
index::Int
@@ -8,9 +21,9 @@ mutable struct TabuRestart <: RestartStrategy
reset_percentage::Float64
end
-function restart(tabu_strat,::Val{:tabu}; rp = 1.0, index = 1)
- limit = tenure(tabu_strat, :tabu) - tenure(tabu_strat, :pick)
- return TabuRestart(index, tenure(tabu_strat, :tabu), limit, rp)
+function restart(strategy, ::Val{:tabu}; rp = 1.0, index = 1)
+ limit = tenure(strategy, :tabu) - tenure(strategy, :pick)
+ return TabuRestart(index, tenure(strategy, :tabu), limit, rp)
end
function check_restart!(rs::TabuRestart; tabu_length)
@@ -55,12 +68,12 @@ end
## Universal restart sequence
function oeis(n, b, ::Val{:A082850})
- m = log(b,n+1)
+ m = log(b, n + 1)
return isinteger(m) ? Int(m) : oeis(n - (b^floor(m) - 1), :A082850)
end
-oeis(n, b, ::Val{:A182105}) = b^(oeis(n, :A082850)-1)
+oeis(n, b, ::Val{:A182105}) = b^(oeis(n, :A082850) - 1)
oeis(n, ref::Symbol, b = 2) = oeis(n, b, Val(ref))
restart(::Any, ::Val{:universal}) = RestartSequence(n -> oeis(n, :A182105))
# Generic restart constructor
-restart(tabu, strategy::Symbol) = restart(tabu, Val(strategy))
\ No newline at end of file
+restart(tabu, strategy::Symbol) = restart(tabu, Val(strategy))
diff --git a/src/strategies/selection.jl b/src/strategies/selection.jl
index e69de29..8b13789 100644
--- a/src/strategies/selection.jl
+++ b/src/strategies/selection.jl
@@ -0,0 +1 @@
+
diff --git a/src/strategies/solution.jl b/src/strategies/solution.jl
index e69de29..8b13789 100644
--- a/src/strategies/solution.jl
+++ b/src/strategies/solution.jl
@@ -0,0 +1 @@
+
diff --git a/src/strategies/tabu.jl b/src/strategies/tabu.jl
index 37e1d17..502d437 100644
--- a/src/strategies/tabu.jl
+++ b/src/strategies/tabu.jl
@@ -4,13 +4,13 @@ struct NoTabu <: TabuStrategy end
struct KeenTabu <: TabuStrategy
tabu_tenure::Int
- tabu_list::Dictionary{Int,Int}
+ tabu_list::Dictionary{Int, Int}
end
struct WeakTabu <: TabuStrategy
tabu_tenure::Int
pick_tenure::Int
- tabu_list::Dictionary{Int,Int}
+ tabu_list::Dictionary{Int, Int}
end
tabu() = NoTabu()
@@ -74,7 +74,6 @@ insert_tabu!(ts::KeenTabu, x, kind::Symbol) = insert_tabu!(ts, x, Val(kind))
insert_tabu!(ts::WeakTabu, x, kind) = insert!(tabu_list(ts), x, max(1, tenure(ts, kind)))
insert_tabu!(ts, x, kind) = nothing
-
"""
_decay_tabu!(s::S) where S <: Union{_State, AbstractSolver}
Decay the tabu list.
@@ -84,4 +83,4 @@ function decay_tabu!(ts)
((x, tabu),) -> tabu == 1 ? delete_tabu!(ts, x) : decrease_tabu!(ts, x),
pairs(tabu_list(ts))
)
-end
\ No newline at end of file
+end
diff --git a/src/strategies/termination.jl b/src/strategies/termination.jl
index e69de29..8b13789 100644
--- a/src/strategies/termination.jl
+++ b/src/strategies/termination.jl
@@ -0,0 +1 @@
+
diff --git a/src/strategy.jl b/src/strategy.jl
index 7ebd77c..3244479 100644
--- a/src/strategy.jl
+++ b/src/strategy.jl
@@ -4,9 +4,10 @@ struct MetaStrategy{RS <: RestartStrategy, TS <: TabuStrategy}
end
function MetaStrategy(model;
- tenure = min(length_vars(model) ÷ 2, 10),
- tabu = tabu(tenure, tenure ÷ 2),
- restart = restart(tabu, Val(:universal)),
+ tenure = min(length_vars(model) ÷ 2, 10),
+ tabu = tabu(tenure, tenure ÷ 2),
+ # restart = restart(tabu, Val(:universal)),
+ restart = restart(tabu, Val(:random); rp = 0.05)
)
return MetaStrategy(restart, tabu)
end
@@ -16,4 +17,4 @@ end
# forwards from TabuStrategy
@forward MetaStrategy.tabu decrease_tabu!, delete_tabu!, decay_tabu!
-@forward MetaStrategy.tabu length_tabu, insert_tabu!, empty_tabu!, tabu_list
\ No newline at end of file
+@forward MetaStrategy.tabu length_tabu, insert_tabu!, empty_tabu!, tabu_list
diff --git a/src/time_stamps.jl b/src/time_stamps.jl
index 6ed032b..0e9b2e6 100644
--- a/src/time_stamps.jl
+++ b/src/time_stamps.jl
@@ -33,7 +33,6 @@ get_time(stamps, ::Val{6}) = stamps.ts6
get_time(stamps, i) = get_time(stamps, Val(i))
-
function time_info(stamps)
info = Dict([
:model => get_time(stamps, 1) - get_time(stamps, 0),
@@ -43,7 +42,7 @@ function time_info(stamps)
:local_run => get_time(stamps, 5) - get_time(stamps, 4),
:remote_stop => get_time(stamps, 6) - get_time(stamps, 5),
:total_run => get_time(stamps, 6) - get_time(stamps, 1),
- :model_and_run => get_time(stamps, 6) - get_time(stamps, 0),
+ :model_and_run => get_time(stamps, 6) - get_time(stamps, 0)
])
return info
end
diff --git a/src/variable.jl b/src/variable.jl
index 5f39107..fcdab2e 100644
--- a/src/variable.jl
+++ b/src/variable.jl
@@ -33,13 +33,13 @@ _get_constraints(x::Variable) = x.constraints
"""
_add_to_constraint!(x::Variable, id)
-Add a constraint `id` to the list of contraints of `x`.
+Add a constraint `id` to the list of constraints of `x`.
"""
_add_to_constraint!(x::Variable, id) = set!(_get_constraints(x), id)
"""
_delete_from_constraint!(x::Variable, id)
-Delete a constraint `id` from the list of contraints of `x`.
+Delete a constraint `id` from the list of constraints of `x`.
"""
_delete_from_constraint!(x::Variable, id) = delete!(x.constraints, id)
diff --git a/test/Aqua.jl b/test/Aqua.jl
new file mode 100644
index 0000000..3623897
--- /dev/null
+++ b/test/Aqua.jl
@@ -0,0 +1,32 @@
+@testset "Aqua.jl" begin
+ import Aqua
+ import LocalSearchSolvers
+
+ # TODO: Fix the broken tests and remove the `broken = true` flag
+ Aqua.test_all(
+ LocalSearchSolvers;
+ ambiguities = (broken = true,),
+ deps_compat = false,
+ piracies = (broken = false,),
+ unbound_args = (broken = false)
+ )
+
+ @testset "Ambiguities: LocalSearchSolvers" begin
+ # Aqua.test_ambiguities(LocalSearchSolvers;)
+ end
+
+ @testset "Piracies: LocalSearchSolvers" begin
+ Aqua.test_piracies(LocalSearchSolvers;)
+ end
+
+ @testset "Dependencies compatibility (no extras)" begin
+ Aqua.test_deps_compat(
+ LocalSearchSolvers;
+ check_extras = false # ignore = [:Random]
+ )
+ end
+
+ @testset "Unbound type parameters" begin
+ # Aqua.test_unbound_args(LocalSearchSolvers;)
+ end
+end
diff --git a/test/Project.toml b/test/Project.toml
deleted file mode 100644
index a01aebb..0000000
--- a/test/Project.toml
+++ /dev/null
@@ -1,7 +0,0 @@
-[deps]
-CompositionalNetworks = "4b67e4b5-442d-4ef5-b760-3f5df3a57537"
-ConstraintDomains = "5800fd60-8556-4464-8d61-84ebf7a0bedb"
-Constraints = "30f324ab-b02d-43f0-b619-e131c61659f7"
-Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4"
-Distributed = "8ba89e20-285c-5b6f-9357-94700520ee1b"
-Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
diff --git a/test/TestItemRunner.jl b/test/TestItemRunner.jl
new file mode 100644
index 0000000..cf86c5a
--- /dev/null
+++ b/test/TestItemRunner.jl
@@ -0,0 +1,3 @@
+@testset "TestItemRunner" begin
+ @run_package_tests
+end
diff --git a/test/internal.jl b/test/internal.jl
index 2adff82..4f5d55d 100644
--- a/test/internal.jl
+++ b/test/internal.jl
@@ -1,10 +1,10 @@
-d1 = domain([4,3,2,1])
+d1 = domain([4, 3, 2, 1])
d2 = domain(1:4)
domains = Dictionary(1:2, [d1, d2])
@testset "Internals: Domains" begin
for d in domains
# constructors and ∈
- for x in [1,2,3,4]
+ for x in [1, 2, 3, 4]
@test x ∈ d
end
# length
@@ -20,7 +20,7 @@ domains = Dictionary(1:2, [d1, d2])
@test 5 ∉ d1
end
-x1 = variable([4,3,2,1])
+x1 = variable([4, 3, 2, 1])
x2 = variable(d2)
x3 = variable() # TODO: tailored test for free variable
vars = Dictionary(1:2, [x1, x2])
@@ -34,7 +34,7 @@ vars = Dictionary(1:2, [x1, x2])
@test x ∉ 2
@test LS._constriction(x) == 1
@test length(x) == 4
- for y in [1,2,3,4]
+ for y in [1, 2, 3, 4]
@test y ∈ x
end
@test rand(x) ∈ x
@@ -71,11 +71,10 @@ objs = Dictionary(1:2, [o1, o2])
end
end
-
m = model()
# LocalSearchSolvers.describe(m)
-x1 = variable([4,3,2,1])
+x1 = variable([4, 3, 2, 1])
x2 = variable(d2)
vars = Dictionary(1:2, [x1, x2])
@@ -95,12 +94,10 @@ objs = Dictionary(1:2, [o1, o2])
end
variable!(m, d1)
-
for c in cons
add!(m, c)
end
- constraint!(m, err, [1,2])
-
+ constraint!(m, err, [1, 2])
for o in objs
add!(m, o)
diff --git a/test/raw_solver.jl b/test/raw_solver.jl
index 24a94c7..2cad075 100644
--- a/test/raw_solver.jl
+++ b/test/raw_solver.jl
@@ -1,5 +1,5 @@
-function mincut(graph; source, sink, interdiction =0)
- m = model(; kind=:cut)
+function mincut(graph; source, sink, interdiction = 0)
+ m = model(; kind = :cut)
n = size(graph, 1)
d = domain(0:n)
@@ -24,7 +24,7 @@ function mincut(graph; source, sink, interdiction =0)
end
function golomb(n, L = n^2)
- m = model(; kind=:golomb)
+ m = model(; kind = :golomb)
# Add variables
d = domain(0:L)
@@ -32,7 +32,7 @@ function golomb(n, L = n^2)
# Extract error function from usual_constraint
e1 = (x; X) -> error_f(USUAL_CONSTRAINTS[:all_different])(x)
- e2 = (x; X) -> error_f(USUAL_CONSTRAINTS[:all_equal])(x; val=0)
+ e2 = (x; X) -> error_f(USUAL_CONSTRAINTS[:all_equal])(x; val = 0)
e3 = (x; X) -> error_f(USUAL_CONSTRAINTS[:dist_different])(x)
# # Add constraints
@@ -49,11 +49,11 @@ function golomb(n, L = n^2)
return m
end
-function sudoku(n; start=nothing)
+function sudoku(n; start = nothing)
N = n^2
d = domain(1:N)
- m = model(;kind=:sudoku)
+ m = model(; kind = :sudoku)
# Add variables
if isnothing(start)
@@ -62,7 +62,6 @@ function sudoku(n; start=nothing)
foreach(((x, v),) -> variable!(m, 1 ≤ v ≤ N ? domain(v) : d), pairs(start))
end
-
e = (x; X) -> error_f(USUAL_CONSTRAINTS[:all_different])(x)
# Add constraints: line, columns; blocks
@@ -86,12 +85,14 @@ end
@testset "Raw solver: internals" begin
models = [
- sudoku(2),
+ sudoku(2)
]
for m in models
# @info describe(m)
- s = solver(m; options=Options(print_level=:verbose, time_limit = Inf, iteration=Inf, info_path="info.json"))
+ s = solver(m;
+ options = Options(print_level = :verbose, time_limit = Inf,
+ iteration = Inf, info_path = "info.json"))
for x in keys(get_variables(s))
@test get_name(s, x) == "x$x"
for c in get_cons_from_var(s, x)
@@ -134,22 +135,22 @@ end
end
@testset "Raw solver: sudoku" begin
- sudoku_instance = collect(Iterators.flatten([
- 9 3 0 0 0 0 0 4 0
- 0 0 0 0 4 2 0 9 0
- 8 0 0 1 9 6 7 0 0
- 0 0 0 4 7 0 0 0 0
- 0 2 0 0 0 0 0 6 0
- 0 0 0 0 2 3 0 0 0
- 0 0 8 5 3 1 0 0 2
- 0 9 0 2 8 0 0 0 0
- 0 7 0 0 0 0 0 5 3
- ]))
-
- s = solver(sudoku(3; start = sudoku_instance); options = Options(print_level = :minimal, iteration = Inf, time_limit = 10))
+ sudoku_instance = collect(Iterators.flatten([9 3 0 0 0 0 0 4 0
+ 0 0 0 0 4 2 0 9 0
+ 8 0 0 1 9 6 7 0 0
+ 0 0 0 4 7 0 0 0 0
+ 0 2 0 0 0 0 0 6 0
+ 0 0 0 0 2 3 0 0 0
+ 0 0 8 5 3 1 0 0 2
+ 0 9 0 2 8 0 0 0 0
+ 0 7 0 0 0 0 0 5 3]))
+
+ s = solver(sudoku(3; start = sudoku_instance);
+ options = Options(print_level = :minimal, iteration = Inf, time_limit = 10))
display(Dictionary(1:length(sudoku_instance), sudoku_instance))
solve!(s)
display(solution(s))
+ display(s.time_stamps)
end
@testset "Raw solver: golomb" begin
@@ -164,27 +165,30 @@ end
@testset "Raw solver: mincut" begin
graph = zeros(5, 5)
- graph[1,2] = 1.0
- graph[1,3] = 2.0
- graph[1,4] = 3.0
- graph[2,5] = 1.0
- graph[3,5] = 2.0
- graph[4,5] = 3.0
- s = solver(mincut(graph, source=1, sink=5), options = Options(print_level = :minimal))
+ graph[1, 2] = 1.0
+ graph[1, 3] = 2.0
+ graph[1, 4] = 3.0
+ graph[2, 5] = 1.0
+ graph[3, 5] = 2.0
+ graph[4, 5] = 3.0
+ s = solver(
+ mincut(graph, source = 1, sink = 5), options = Options(print_level = :minimal))
solve!(s)
@info "Results mincut!"
@info "Values: $(get_values(s))"
@info "Sol (val): $(best_value(s))"
@info "Sol (vals): $(!isnothing(best_value(s)) ? best_values(s) : nothing)"
- s = solver(mincut(graph, source=1, sink=5, interdiction=1), options = Options(print_level = :minimal))
+ s = solver(mincut(graph, source = 1, sink = 5, interdiction = 1),
+ options = Options(print_level = :minimal))
solve!(s)
@info "Results 1-mincut!"
@info "Values: $(get_values(s))"
@info "Sol (val): $(best_value(s))"
@info "Sol (vals): $(!isnothing(best_value(s)) ? best_values(s) : nothing)"
- s = solver(mincut(graph, source=1, sink=5, interdiction=2); options = Options(print_level=:minimal, time_limit = 15, iteration=Inf))
+ s = solver(mincut(graph, source = 1, sink = 5, interdiction = 2);
+ options = Options(print_level = :minimal, time_limit = 15, iteration = Inf))
# @info describe(s)
solve!(s)
@info "Results 2-mincut!"
diff --git a/test/runtests.jl b/test/runtests.jl
index fda6569..ad5f198 100644
--- a/test/runtests.jl
+++ b/test/runtests.jl
@@ -1,8 +1,4 @@
-
using Distributed
-# Add a process with two threads
-# addprocs(1; exeflags = ["-t 2", "--project"])
-# addprocs(1)
import ConstraintDomains
import CompositionalNetworks
@@ -10,17 +6,14 @@ import CompositionalNetworks
using Dictionaries
@everywhere using LocalSearchSolvers
using Test
-
-
-# @testset "Distributed" begin
-# @test workers() == [2]
-# end
-
-
+using TestItemRunner
+using TestItems
const LS = LocalSearchSolvers
@testset "LocalSearchSolvers.jl" begin
- include("internal.jl")
- include("raw_solver.jl")
+ include("Aqua.jl")
+ include("TestItemRunner.jl")
+ # include("internal.jl")
+ # include("raw_solver.jl")
end