diff --git a/Project.toml b/Project.toml index 0ee6cccc..9cdb99a5 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "SequentialSamplingModels" uuid = "0e71a2a6-2b30-4447-8742-d083a85e82d1" authors = ["itsdfish"] -version = "0.10.2" +version = "0.11.0" [deps] Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" diff --git a/README.md b/README.md index dca7ea6c..57ed4329 100644 --- a/README.md +++ b/README.md @@ -21,6 +21,7 @@ The following SSMs are supported : - [Linear Ballistic Accumulator](https://itsdfish.github.io/SequentialSamplingModels.jl/dev/lba/) - [Log Normal Race](https://itsdfish.github.io/SequentialSamplingModels.jl/dev/lnr/) - [Multi-attribute Attentional Drift Diffusion](https://itsdfish.github.io/SequentialSamplingModels.jl/dev/maaDDM/) +- [Multi-attribute Decision Field Theory](https://itsdfish.github.io/SequentialSamplingModels.jl/dev/mdft/) - [Poisson Race](https://itsdfish.github.io/SequentialSamplingModels.jl/dev/poisson_race) - [Racing Diffusion](https://itsdfish.github.io/SequentialSamplingModels.jl/dev/rdm/) - [Wald](https://itsdfish.github.io/SequentialSamplingModels.jl/dev/wald/) diff --git a/benchmark/benchmarks.jl b/benchmark/benchmarks.jl index 405858f3..706b059e 100644 --- a/benchmark/benchmarks.jl +++ b/benchmark/benchmarks.jl @@ -61,3 +61,24 @@ for dist ∈ dists SUITE[:simulate][dist_name] = @benchmarkable(simulate($dist()), evals = 10, samples = 1000,) end + +parms = ( + σ = 0.1, + α = 0.50, + τ = 0.0, + γ = 1.0, + ϕ1 = 0.01, + ϕ2 = 0.1, + β = 10, + κ = [5, 5] +) + +mdft = MDFT(; n_alternatives = 3, parms...) + +M = [ + 1.0 3.0 + 3.0 1.0 + 0.9 3.1 +] + +SUITE[:simulate][:mdft] = @benchmarkable(simulate(mdft, M), evals = 10, samples = 1000,) diff --git a/docs/make.jl b/docs/make.jl index 366b4bf9..982c648c 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -30,7 +30,8 @@ makedocs( "Leaky Competing Accumulator (LCA)" => "lca.md", "Linear Ballistic Accumulator (LBA)" => "lba.md", "Lognormal Race Model (LNR)" => "lnr.md", - "Muti-attribute attentional drift diffusion Model" => "maaDDM.md", + "Muti-attribute Attentional Drift Diffusion Model" => "maaDDM.md", + "Multi-attribute Decision Field Theory" => "mdft.md", "Poisson Race" => "poisson_race.md", "Racing Diffusion Model (RDM)" => "rdm.md", "Starting-time Drift Diffusion Model (stDDM)" => "stDDM.md", diff --git a/docs/src/DDM.md b/docs/src/DDM.md index 2e29adb0..63dc5887 100644 --- a/docs/src/DDM.md +++ b/docs/src/DDM.md @@ -91,12 +91,10 @@ logpdf.(dist, choices, rts) ``` ## Compute Choice Probability -The choice probability $\Pr(C=c)$ is computed by passing the model and choice index to `cdf`. - +The choice probability $\Pr(C=c)$ is computed by passing the model and choice index to `cdf` along with a large value for time as the second argument. ```@example DDM -cdf(dist, 1) +cdf(dist, 1, 10) ``` -To compute the joint probability of choosing $c$ within $t$ seconds, i.e., $\Pr(T \leq t \wedge C=c)$, pass a third argument for $t$. ## Plot Simulation The code below overlays the PDF on reaction time histograms for each option. diff --git a/docs/src/cddm.md b/docs/src/cddm.md index 927ffbe6..9b5f41ab 100644 --- a/docs/src/cddm.md +++ b/docs/src/cddm.md @@ -11,7 +11,8 @@ model = CDDM(; η = [.50,.50], σ = 1.0, α = 4.0, - τ = .30) + τ = .30 +) Random.seed!(5874) @@ -90,7 +91,7 @@ Non-decision time is an additive constant representing encoding and motor respon Now that values have been asigned to the parameters, we will pass them to `CDDM` to generate the model object. ```@example CDDM -dist = CDDM(ν, η, σ, α, τ) +dist = CDDM(; ν, η, σ, α, τ) ``` ## Simulate Model diff --git a/docs/src/index.md b/docs/src/index.md index 104b29dc..ad4582df 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -19,13 +19,14 @@ using SequentialSamplingModels using SequentialSamplingModels: increment! Random.seed!(8437) -parms = (α = 1.5, - β=0.20, - λ=0.10, - ν=[2.5,2.0], - Δt=.001, - τ=.30, - σ=1.0) +parms = ( + α = 1.5, + β=0.20, + λ=0.10, + ν=[2.5,2.0], + τ=.30, + σ=1.0 +) model = LCA(; parms...) time_steps,evidence = simulate(model) lca_plot = plot(time_steps, evidence, xlabel="Time (seconds)", ylabel="Evidence", diff --git a/docs/src/lba.md b/docs/src/lba.md index b442d187..2caf2265 100644 --- a/docs/src/lba.md +++ b/docs/src/lba.md @@ -86,12 +86,10 @@ logpdf.(dist, choices, rts) ``` ## Compute Choice Probability -The choice probability $\Pr(C=c)$ is computed by passing the model and choice index to `cdf`. - +The choice probability $\Pr(C=c)$ is computed by passing the model and choice index to `cdf` along with a large value for time as the second argument. ```@example lba -cdf(dist, 1) +cdf(dist, 1, Inf) ``` -To compute the joint probability of choosing $c$ within $t$ seconds, i.e., $\Pr(T \leq t \wedge C=c)$, pass a third argument for $t$. ## Plot Simulation The code below overlays the PDF on reaction time histograms for each option. diff --git a/docs/src/lca.md b/docs/src/lca.md index 51d82816..0efcae2c 100644 --- a/docs/src/lca.md +++ b/docs/src/lca.md @@ -51,12 +51,6 @@ Diffusion noise is the amount of within trial noise in the evidence accumulation ```@example lca σ = 1.0 ``` -### Time Step -The time step parameter $\Delta t$ is the precision of the discrete time approxmation. - -```@example lca -Δt = .001 -``` ### Non-Decision Time @@ -69,7 +63,7 @@ Non-decision time is an additive constant representing encoding and motor respon Now that values have been asigned to the parameters, we will pass them to `LCA` to generate the model object. ```@example lca -dist = LCA(; ν, α, β, λ, τ, σ, Δt) +dist = LCA(; ν, α, β, λ, τ, σ) ``` ## Simulate Model @@ -78,13 +72,13 @@ Now that the model is defined, we will generate $10,000$ choices and reaction ti ```@example lca choices,rts = rand(dist, 10_000) ``` -## Compute Choice Probability -The choice probability $\Pr(C=c)$ is computed by passing the model and choice index to `cdf`. +In the code block above, `rand` has a keyword argument `Δt` which controls the precision of the discrete approximation. The default value is `Δt = .001`. +## Compute Choice Probability +The choice probability $\Pr(C=c)$ is computed by passing the model and choice index to `cdf` along with a large value for time as the second argument. ```@example lca -cdf(dist, 1) +cdf(dist, 1, Inf) ``` -To compute the joint probability of choosing $c$ within $t$ seconds, i.e., $\Pr(T \leq t \wedge C=c)$, pass a third argument for $t$. ## Plot Simulation The code below plots a histogram for each option. diff --git a/docs/src/lnr.md b/docs/src/lnr.md index 40136f9f..58fab9f1 100644 --- a/docs/src/lnr.md +++ b/docs/src/lnr.md @@ -74,7 +74,7 @@ logpdf.(dist, choices, rts) The choice probability $\Pr(C=c)$ is computed by passing the model and choice index to `cdf`. ```@example lnr -cdf(dist, 1) +cdf(dist, 1, Inf) ``` To compute the joint probability of choosing $c$ within $t$ seconds, i.e., $\Pr(T \leq t \wedge C=c)$, pass a third argument for $t$. diff --git a/docs/src/mdft.md b/docs/src/mdft.md new file mode 100644 index 00000000..3a3fbc31 --- /dev/null +++ b/docs/src/mdft.md @@ -0,0 +1,204 @@ +```@setup MDFT +using SequentialSamplingModels +using Plots +using Random +M = [ + 1.0 3.0 # A + 3.0 1.0 # B + 0.9 3.1 # S +] +``` + +# Multi-attribute Decision Field Theory + +Multi-attribute Decision Field Theory (MDFT; Roe, Busemeyer, & Townsend, 2001) models how people choose between alternatives with multiple dimensions, such as cars, phones, or jobs. As an example, jobs may differ in terms of benefits, salary, flexibility, and work-life balance. As with other sequential sampling models, MDFT assumes that evidence (or preference) accumulates dynamically until the evidence for one alternative reaches a threshold, and triggers the selection of the winning alternative. MDFT incorporates three additional core assumptions: + +1. Attention switches between attributes, and alternatives are compared on the currently attended attribute +2. As two alternatives become closer to each other in attribute space, their mutual inhibition increases +3. Evidence for each alternative gradually decays across time + +One of MDFT's strong suits is accounting for context effects in preferential decision making. A context effect occurs when the preference relationship between two alternatives changes when a third alternative is included in the choice set. In such cases, the preferences may reverse or the decision maker may violate rational choice principles. + +Note that this version of MDFT uses stochastic differential equations (see Evans et al., 2019). For the random walk version, see `ClassicMDFT`. + +# Similarity Effect + +In what follows, we will illustrate the use of MDFT with a demonstration of the similarity effect. +Consider the choice between two jobs, `A` and `B`. The main criteria for evaluating the two jobs are salary and flexibility. Job `A` is high on salary but low on flexibility, whereas job `B` is low on salary. In the plot below, jobs `A` and `B` are located on the line of indifference, $y = 3 - x$. However, because salary recieves more attention, job `A` is slightly prefered over job `B`. + +```@example MDFT +scatter( + M[:, 1], + M[:, 2], + grid = false, + leg = false, + lims = (0, 4), + xlabel = "Flexibility", + ylabel = "Salary", + markersize = 6, + markerstrokewidth = 2 +) +annotate!(M[1, 1] + 0.10, M[1, 2] + 0.25, "A") +annotate!(M[2, 1] + 0.10, M[2, 2] + 0.25, "B") +annotate!(M[3, 1] + 0.10, M[3, 2] + 0.25, "S") +plot!(0:0.1:4, 4:-0.1:0, color = :black, linestyle = :dash) +``` +Suppose an job `S`, which is similar to A is added to the set of alternatives. Job `S` inhibits job `A` more than job `B` because `S` and `A` are close in attribute space. As a result, the preference for job `A` over job `B` is reversed. Formally, this is stated as: + +```math +\Pr(A \mid \{A,B\}) > \Pr(B \mid \{A,B\}) +``` + +```math +\Pr(A \mid \{A,B,S\}) < \Pr(B \mid \{A,B,S\}) +``` + +## Load Packages +The first step is to load the required packages. + +```@example MDFT +using SequentialSamplingModels +using Plots +using Random + +Random.seed!(8741) +``` +## Create Model Object +In the code below, we will define parameters for the MDFT and create a model object to store the parameter values. + +### Drift Rate Scalar +In MDFT, the drift rate is determined by the contrast between alternatives along the attended attribute. These evaluations are scaled by the parameter $\gamma$: + +```@example MDFT +γ = 1.0 +``` +### Threshold +The threshold $\alpha$ represents the amount of evidence required to make a decision. +```@example MDFT +α = .50 +``` + +### Dominance Weight +In MDFT, alternatives are compared along the dominance dimension (diagonal) and indifference dimension (off-diagonal) in attribute space. The relative weight of the dominance dimension is controlled by parameter $\beta$ +```@example MDFT +β = 10 +``` + +### Lateral Inhibition + +In MDFT, alternatives inhibit each other as an inverse function of thier distance in attribute space: the closer they are, the more inhibititory the relationship. Lateral inhibition is controled via a +alternative $\times$ alternative feedback matrix in which the diagonal elements (e.g., self-inhibition) represents decay or leakage, and the non-diagonal elements represent lateral inhibition between different alternatives. The values of the feedback matrix are controled by a Gaussian distance function with two parameters: $\phi_1$ and $\phi_2$. + +#### Inhibition Strength + +The distance gradient parameter $\phi_1$ controls the strength of lateral inhibition between alternatives: + +```@example MDFT +ϕ1 = .01 +``` + +#### Maximum Inhibition + +Maximimum inhibition and decay is controlled by parameter $\phi_2$: + +```@example MDFT +ϕ2 = .10 +``` + +### Diffusion Noise +Diffusion noise is the amount of within trial noise in the evidence accumulation process. +```@example MDFT +σ = .10 +``` + +### Non-Decision Time + +Non-decision time is an additive constant representing encoding and motor response time. +```@example MDFT +τ = 0.30 +``` +### Attention Switching Rates +The rate at which attention shifts from one attribute to the other is controlled by the following rate parameters: +```@example MDFT +κ = [6, 5] +``` +The second rate is lower than the first rate to reflect more attention to the second dimension (i.e., salary). + +### MDFT Constructor + +Now that values have been asigned to the parameters, we will pass them to `MDFT` to generate the model object. We will begin with the choice between job `A` and job `B`. + +```@example MDFT +dist = MDFT(; + n_alternatives = 2, + σ, + α, + τ, + γ, + κ, + ϕ1, + ϕ2, + β, +) +``` +## Simulate Model + +Now that the model is defined, we will generate 10,000 choices and reaction times using `rand`. + + ```@example MDFT +M₂ = [ + 1.0 3.0 # A + 3.0 1.0 # B +] + +choices,rts = rand(dist, 10_000, M₂; Δt = .001) +probs2 = map(c -> mean(choices .== c), 1:2) +``` +Here, we see that job `A` is prefered over job `B`. Also note, in the code block above, `rand` has a keyword argument `Δt` which controls the precision of the discrete approximation. The default value is `Δt = .001`. + +Next, we will simulate the choice between jobs `A`, `B`, and `S`. + + ```@example MDFT +dist = MDFT(; + n_alternatives = 3, + σ, + α, + τ, + γ, + κ, + ϕ1, + ϕ2, + β, +) + +M₃ = [ + 1.0 3.0 # A + 3.0 1.0 # B + 0.9 3.1 # S +] + +choices,rts = rand(dist, 10_000, M₃) +probs3 = map(c -> mean(choices .== c), 1:3) +``` +In this case, the preferences have reversed: job `B` is now preferred over job `A`. + +## Compute Choice Probability +The choice probability $\Pr(C=c)$ is computed by passing the model and choice index to `cdf` along with a large value for time as the second argument. + + ```@example MDFT +cdf(dist, 1, Inf, M₃) +``` + +## Plot Simulation +The code below plots a histogram for each alternative. + ```@example MDFT +histogram(dist; model_args = (M₃,)) +``` +# References + +Evans, N. J., Holmes, W. R., & Trueblood, J. S. (2019). Response-time data provide critical constraints on dynamic models of multi-alternative, multi-attribute choice. Psychonomic Bulletin & Review, 26, 901-933. + +Hotaling, J. M., Busemeyer, J. R., & Li, J. (2010). Theoretical developments in decision +field theory: Comment on tsetsos, usher, and chater (2010). Psychological Review, 117 , 1294-1298. + +Roe, Robert M., Jermone R. Busemeyer, and James T. Townsend. "Multi-attribute Decision Field Theory: A dynamic connectionst model of decision making." Psychological review 108.2 (2001): 370. \ No newline at end of file diff --git a/docs/src/poisson_race.md b/docs/src/poisson_race.md index 9ac14817..a9ba285b 100644 --- a/docs/src/poisson_race.md +++ b/docs/src/poisson_race.md @@ -71,12 +71,10 @@ logpdf.(dist, choices, rts) ``` ## Compute Choice Probability -The choice probability $\Pr(C=c)$ is computed by passing the model and choice index to `cdf`. - +The choice probability $\Pr(C=c)$ is computed by passing the model and choice index to `cdf` along with a large value for time as the second argument. ```@example poisson_race -cdf(dist, 1) +cdf(dist, 1, Inf) ``` -To compute the joint probability of choosing $c$ within $t$ seconds, i.e., $\Pr(T \leq t \wedge C=c)$, pass a third argument for $t$. ## Plot Simulation The code below overlays the PDF on reaction time histograms for each option. diff --git a/docs/src/rdm.md b/docs/src/rdm.md index 21c65cbe..65e65c26 100644 --- a/docs/src/rdm.md +++ b/docs/src/rdm.md @@ -79,12 +79,10 @@ logpdf.(dist, choices, rts) ``` ## Compute Choice Probability -The choice probability $\Pr(C=c)$ is computed by passing the model and choice index to `cdf`. - +The choice probability $\Pr(C=c)$ is computed by passing the model and choice index to `cdf` along with a large value for time as the second argument. ```@example rdm -cdf(dist, 1) +cdf(dist, 1, Inf) ``` -To compute the joint probability of choosing $c$ within $t$ seconds, i.e., $\Pr(T \leq t \wedge C=c)$, pass a third argument for $t$. ## Plot Simulation The code below overlays the PDF on reaction time histograms for each option. diff --git a/docs/src/stDDM.md b/docs/src/stDDM.md index 6ca7dd81..10b87a9c 100644 --- a/docs/src/stDDM.md +++ b/docs/src/stDDM.md @@ -74,11 +74,10 @@ Now that the model is defined, we will generate $10,000$ choices and reaction ti ``` ## Compute Choice Probability -The choice probability $\Pr(C=c)$ is computed by passing the model and choice index to `cdf`. +The choice probability $\Pr(C=c)$ is computed by passing the model and choice index to `cdf` along with a large value for time as the second argument. ```@example stDDM -cdf(dist, 1) +cdf(dist, 1, Inf) ``` -To compute the joint probability of choosing $c$ within $t$ seconds, i.e., $\Pr(T \leq t \wedge C=c)$, pass a third argument for $t$. ## Plot Simulation The code below overlays the PDF on reaction time histograms for each option. diff --git a/ext/plots/plot_model.jl b/ext/plots/plot_model.jl index 69838eb1..e674562e 100644 --- a/ext/plots/plot_model.jl +++ b/ext/plots/plot_model.jl @@ -44,8 +44,10 @@ function plot_model( add_starting_point!(model, model_plot) α = compute_threshold(model) zs = Vector{Vector{Float64}}(undef, n_sim) + y_min = 0 for i ∈ 1:n_sim time_range, evidence = simulate(model, model_args...; model_kwargs...) + y_min = min(y_min, minimum(evidence)) plot!( model_plot, time_range .+ model.τ, @@ -68,6 +70,7 @@ function plot_model( model_args, model_kwargs, density_scale, + ylims = (y_min, Inf), density_kwargs... ) end @@ -369,6 +372,32 @@ function get_model_plot_defaults(d::AbstractLCA) ) end +""" + get_model_plot_defaults(d::MDFT) + +Returns default plot options + +# Arguments + +- `d::MDFT`: an object for multi-attribute decision field theory +""" +function get_model_plot_defaults(d::MDFT) + n_subplots = n_options(d) + title = ["choice $i" for _ ∈ 1:1, i ∈ 1:n_subplots] + return ( + xaxis = nothing, + yaxis = nothing, + xticks = nothing, + yticks = nothing, + grid = false, + linewidth = 0.75, + color = :black, + leg = false, + title, + layout = (n_subplots, 1) + ) +end + """ get_model_plot_defaults(d::AbstractWald) diff --git a/src/ClassicMDFT.jl b/src/ClassicMDFT.jl new file mode 100644 index 00000000..3010070b --- /dev/null +++ b/src/ClassicMDFT.jl @@ -0,0 +1,178 @@ + +""" + ClassicMDFT{T <: Real} <: AbstractMDFT + +A model type for Multiattribute Decision Field Theory. + +# Parameters +- `σ = 1.0`: diffusion noise +- `α = 15.0`: evidence threshold +- `τ = .30`: non-decision time +- `w::Vector{T}`: attention weights vector where each element corresponds to the attention given to the corresponding dimension +- `S::Array{T, 2}`: feedback matrix allowing self-connections and interconnections between alternatives. Self-connections range from zero to 1, where s_ij < 1 represents decay. Interconnections + between options i and j where i ≠ j are inhibatory if s_ij < 0. +- `C::Array{T, 2}`: contrast weight matrix where c_ij is the contrast weight when comparing options i and j. + +# Constructors + + ClassicMDFT(σ, α, τ, w, S, C) + + ClassicMDFT(σ, α, τ, w, S, C = make_default_contrast(S)) + +# Example + +An example of the similarity effect. When choosing between options 1 and 2, the model predicts equal preference +because the options fall along the diagonal of attribute space, signifying a 1 to 1 trade-off of equally weighted +attributes. Option 3 is introduced to the choice set, which is similar to (and competitive with) option 1 and disimilar to option 2. +In this case, the model predicts an increase the choice probability for option 2 relative to option 1. +```julia +# value matrix where rows correspond to alternatives, and columns correspond to attributes +M = [ + 1.0 3.0 + 3.0 1.0 + 0.9 3.1 +] + +model = ClassicMDFT(; + # non-decision time + τ = 0.300, + # diffusion noise + σ = 1.0, + # decision threshold + α = 17.5, + # attribute attention weights + w = [0.5, 0.5], + # feedback matrix + S = [ + 0.9500000 -0.0122316 -0.04999996 + -0.0122316 0.9500000 -0.00903030 + -0.0499996 -0.0090303 0.95000000 + ], +) +choices, rts = rand(model, 10_000, M; Δt = 1.0) +map(c -> mean(choices .== c), 1:3) +``` +# References + +Roe, Robert M., Jermone R. Busemeyer, and James T. Townsend. "Multiattribute Decision Field Theory: A dynamic connectionst model of decision making." Psychological review 108.2 (2001): 370. +""" +mutable struct ClassicMDFT{T <: Real} <: AbstractMDFT + σ::T + α::T + τ::T + w::Vector{T} + S::Array{T, 2} + C::Array{T, 2} +end + +function ClassicMDFT(σ, α, τ, w, S, C) + σ, α, τ, _, _, _ = promote(σ, α, τ, w[1], S[1], C[1]) + w = convert(Vector{typeof(τ)}, w) + S = convert(Array{typeof(τ), 2}, S) + C = convert(Array{typeof(τ), 2}, C) + return ClassicMDFT(σ, α, τ, w, S, C) +end + +function ClassicMDFT(; + σ = 1.0, + α = 15.0, + τ = 10.0, + w, + S, + C = make_default_contrast(size(S, 1)) +) + return ClassicMDFT(σ, α, τ, w, S, C) +end + +get_pdf_type(d::AbstractMDFT) = Approximate + +function params(d::ClassicMDFT) + return (d.σ, d.α, d.τ, d.w, d.S, d.C) +end + +""" + rand( + rng::AbstractRNG, + dist::AbstractMDFT, + n_sim::Int, + M::AbstractArray; + Δt = 0.001 + ) + +Generate `n_sim` random choice-rt pairs for the Multiattribute Decision Field Theory (MDFT). + +# Arguments + +- `rng::AbstractRNG`: a random number generator which is a subtype of `AbstractRNG` +- `dist::AbstractMDFT`: model object for the Multiattribute Decision Field Theory (MDFT). +- `n_sim::Int`: the number of simulated choice-rt pairs +- `M::AbstractArray`: an alternative × attribute value matrix representing the value of the stimuli + +# Keywords + +- `Δt = 0.001`: time step size +""" +function rand( + rng::AbstractRNG, + dist::ClassicMDFT, + n_sim::Int, + M::AbstractArray; +) + n_alternatives = size(M, 1) + x = fill(0.0, n_alternatives) + Δμ = fill(0.0, n_alternatives) + choices = fill(0, n_sim) + rts = fill(0.0, n_sim) + CM = dist.C * M + for i ∈ 1:n_sim + choices[i], rts[i] = _rand(rng, dist, x, Δμ, CM) + x .= 0.0 + end + return (; choices, rts) +end + +rand(dist::ClassicMDFT, M::AbstractArray;) = rand(Random.default_rng(), dist, M) + +rand(dist::ClassicMDFT, n_sim::Int, M::AbstractArray) = + rand(Random.default_rng(), dist, n_sim, M) + +function rand(rng::AbstractRNG, dist::ClassicMDFT, M::AbstractArray) + n_alternatives = size(M, 1) + # evidence for each alternative + x = fill(0.0, n_alternatives) + # mean change in evidence for each alternative + Δμ = fill(0.0, n_alternatives) + # noise for each alternative + ϵ = fill(0.0, n_alternatives) + # precompute matric multiplication + CM = dist.C * M + return _rand(rng, dist, x, Δμ, CM) +end + +function _rand(rng::AbstractRNG, dist::ClassicMDFT, x, Δμ, CM) + (; α, τ) = dist + t = 0.0 + while all(x .< α) + increment!(rng, dist, x, Δμ, CM) + t += 1 + end + _, choice = findmax(x) + rt = t + τ + return (; choice, rt) +end + +function increment!(rng::AbstractRNG, dist::ClassicMDFT, x, Δμ, CM) + (; σ, w, S, C) = dist + n_alternatives, n_attributes = size(CM) + att_idx = sample(1:n_attributes, Weights(w)) + v = @view CM[:, att_idx] + compute_mean_evidence!(dist, x, Δμ, v) + x .= Δμ .+ C * rand(rng, Normal(0, σ), n_alternatives) + return nothing +end + +function compute_mean_evidence!(dist::AbstractMDFT, x, Δμ, v) + (; S) = dist + Δμ .= S * x .+ v + return nothing +end diff --git a/src/LCA.jl b/src/LCA.jl index 2e2e5059..24c89972 100644 --- a/src/LCA.jl +++ b/src/LCA.jl @@ -20,7 +20,7 @@ Two constructors are defined below. The first constructor uses positional argume The second constructor uses keywords with default values, and is not order dependent: - LCA(; ν = [2.5, 2.0], α = 1.5, β = 0.20, λ = 0.10, τ = 0.30, σ = 1.0) + LCA(; ν = [2.5, 2.0], σ = 1.0, β = 0.20, λ = 0.10, α = 1.5, τ = 0.30) # Example @@ -33,7 +33,7 @@ using SequentialSamplingModels σ = 1.0 τ = 0.30 -dist = LCA(; ν, α, β, λ, τ, σ, Δt) +dist = LCA(; ν, α, β, λ, τ, σ) choices,rts = rand(dist, 500) ``` # References @@ -55,7 +55,7 @@ function LCA(ν, σ, β, λ, α, τ) return LCA(ν, σ, β, λ, α, τ) end -function LCA(; ν = [2.5, 2.0], α = 1.5, β = 0.20, λ = 0.10, τ = 0.30, σ = 1.0) +function LCA(; ν = [2.5, 2.0], σ = 1.0, β = 0.20, λ = 0.10, α = 1.5, τ = 0.30) return LCA(ν, σ, β, λ, α, τ) end @@ -75,15 +75,13 @@ Generate a random choice-rt pair for the Leaky Competing Accumulator. - `Δt = 0.001`: time step size """ function rand(rng::AbstractRNG, dist::AbstractLCA; Δt = 0.001) - # number of trials + # number of choices n = length(dist.ν) # evidence for each alternative x = fill(0.0, n) # mean change in evidence for each alternative Δμ = fill(0.0, n) - # noise for each alternative - ϵ = fill(0.0, n) - return simulate_trial(rng, dist, x, Δμ, ϵ; Δt) + return _rand(rng, dist, x, Δμ; Δt) end """ @@ -103,21 +101,20 @@ function rand(rng::AbstractRNG, dist::AbstractLCA, n_sim::Int; Δt = 0.001) n = length(dist.ν) x = fill(0.0, n) Δμ = fill(0.0, n) - ϵ = fill(0.0, n) choices = fill(0, n_sim) rts = fill(0.0, n_sim) - for i = 1:n_sim - choices[i], rts[i] = simulate_trial(rng, dist, x, Δμ, ϵ; Δt) + for i ∈ 1:n_sim + choices[i], rts[i] = _rand(rng, dist, x, Δμ; Δt) x .= 0.0 end return (; choices, rts) end -function simulate_trial(rng::AbstractRNG, dist, x, Δμ, ϵ; Δt = 0.001) +function _rand(rng::AbstractRNG, dist::AbstractLCA, x, Δμ; Δt = 0.001) (; α, τ) = dist t = 0.0 while all(x .< α) - increment!(rng, dist, x, Δμ, ϵ; Δt) + increment!(rng, dist, x, Δμ; Δt) t += Δt end _, choice = findmax(x) @@ -125,31 +122,23 @@ function simulate_trial(rng::AbstractRNG, dist, x, Δμ, ϵ; Δt = 0.001) return (; choice, rt) end -increment!(ν, β, λ, σ, Δt, x, Δμ, ϵ) = - increment!(Random.default_rng(), ν, β, λ, σ, Δt, x, Δμ, ϵ) - -function increment!(rng::AbstractRNG, ν, β, λ, σ, Δt, x, Δμ, ϵ) +function increment!(rng::AbstractRNG, dist::AbstractLCA, x, Δμ; Δt = 0.001) + (; ν, σ) = dist n = length(ν) # compute change of mean evidence: νᵢ - λxᵢ - βΣⱼxⱼ - compute_mean_evidence!(ν, β, λ, x, Δμ) - # sample noise - ϵ .= rand(rng, Normal(0, σ), n) + compute_mean_evidence!(dist, x, Δμ) # add mean change in evidence plus noise - x .+= Δμ * Δt .+ ϵ * √(Δt) + x .+= Δμ * Δt .+ rand(rng, Normal(0, σ * √(Δt)), n) # ensure that evidence is non-negative x .= max.(x, 0.0) return nothing end -increment!(dist, x, Δμ, ϵ; Δt = 0.001) = - increment!(Random.default_rng(), dist, x, Δμ, ϵ; Δt) - -function increment!(rng::AbstractRNG, dist, x, Δμ, ϵ; Δt = 0.001) - (; ν, β, λ, σ) = dist - return increment!(rng, ν, β, λ, σ, Δt, x, Δμ, ϵ) -end +increment!(dist, x, Δμ; Δt = 0.001) = + increment!(Random.default_rng(), dist, x, Δμ; Δt) -function compute_mean_evidence!(ν, β, λ, x, Δμ) +function compute_mean_evidence!(dist::AbstractLCA, x, Δμ) + (; ν, β, λ) = dist for i = 1:length(ν) Δμ[i] = ν[i] - λ * x[i] - β * inhibit(x, i) end @@ -179,13 +168,12 @@ function simulate(model::AbstractLCA; Δt = 0.001, _...) n = length(model.ν) x = fill(0.0, n) μΔ = fill(0.0, n) - ϵ = fill(0.0, n) t = 0.0 evidence = [fill(0.0, n)] time_steps = [t] while all(x .< α) t += Δt - increment!(model, x, μΔ, ϵ; Δt) + increment!(model, x, μΔ; Δt) push!(evidence, copy(x)) push!(time_steps, t) end diff --git a/src/MDFT.jl b/src/MDFT.jl new file mode 100644 index 00000000..a7d197f0 --- /dev/null +++ b/src/MDFT.jl @@ -0,0 +1,371 @@ +""" + MDFT{T <: Real} <: AbstractMDFT + +A model type for simulating Multi-attribute Decision Field Theory (MDFT) as an Stochastic Differential Equation (SDE). + +# Parameters +- `σ = 1.0`: diffusion noise +- `α = 15.0`: evidence threshold +- `τ = .30`: non-decision time +- `γ::T`: scales the valance, `CMW`, functioning like a drift rate +- `κ::Vector{T}`: exponential rate parameters for switching attention between attributes. Currently, limited to two + attributes +- `ϕ1`: controls the sensitivity of lateral inhibition to distance in the distance function for creating the feedback matrix, `S` +- `ϕ2`: controls evidence decay and maximum inhibition in the distance function for creating the feedback matrix, `S` +- `β`: controls the weight of the dominance dimension in the feedback matrix distance function. If `β` < 0, the indifference dimension + recieves more where. If `β` > 0, the dominance dimension recieves more weight +- `S::Array{T, 2}`: feedback matrix allowing self-connections and interconnections between alternatives. Self-connections range from zero to 1, where s_ij < 1 represents decay. Interconnections + between options i and j where i ≠ j are inhibitory if s_ij < 0. +- `C::Array{T, 2}`: contrast weight matrix for comparing attended alternative to other alternatives. + The element c_ij is the contrast weight when comparing options i and j. + +# Constructors + + MDFT(σ, α, τ, γ, κ, ϕ1, ϕ2, β, C) + + MDFT(; + n_alternatives, + σ, + α, + τ, + γ, + κ, + ϕ1, + ϕ2, + β, + C = make_default_contrast(n_alternatives) + ) + +# Example + +An example of the similarity effect. When choosing between options 1 and 2, the model predicts equal preference +because the options fall along the diagonal of attribute space, signifying a 1 to 1 trade-off of equally weighted +attributes. Option 3 is introduced to the choice set, which is similar to (and competitive with) option 1 and disimilar to option 2. +In this case, the model predicts a reversal of preference between options 1 and 2. +```julia +using SequentialSamplingModels + +model = MDFT(; + n_alternatives = 3, + σ = 0.1, + α = .50, + τ = 0.0, + γ = 1.0, + κ = [6.0, 5.0], + ϕ1 = 0.01, + ϕ2 = 0.10, + β = 10.0 +) +# value matrix where rows correspond to alternatives, and columns correspond to attributes +M = [ + 1.0 3.0 + 3.0 1.0 + 0.9 3.1 +] + +choices, rts = rand(model, 10_000, M) +probs = map(c -> mean(choices .== c), 1:3) +``` +# References + +Evans, N. J., Holmes, W. R., & Trueblood, J. S. (2019). Response-time data provide critical constraints on dynamic models of multi-alternative, multi-attribute choice. Psychonomic Bulletin & Review, 26, 901-933. + +Hotaling, J. M., Busemeyer, J. R., & Li, J. (2010). Theoretical developments in decision +field theory: Comment on tsetsos, usher, and chater (2010). Psychological Review, 117 , 1294-1298. + +Roe, Robert M., Jermone R. Busemeyer, and James T. Townsend. "Multi-attribute Decision Field Theory: A dynamic connectionst model of decision making." Psychological review 108.2 (2001): 370. +""" +mutable struct MDFT{T <: Real} <: AbstractMDFT + σ::T + α::T + τ::T + γ::T + κ::Vector{T} + ϕ1::T + ϕ2::T + β::T + S::Array{T, 2} + C::Array{T, 2} + _CM::Array{T, 2} + _att_idx::Int +end + +function MDFT(σ, α, τ, γ, κ, ϕ1, ϕ2, β, C) + σ, α, τ, γ, _, ϕ1, ϕ2, β, = promote(σ, α, τ, γ, κ[1], ϕ1, ϕ2, β) + κ = convert(Vector{typeof(τ)}, κ) + C = convert(Array{typeof(τ), 2}, C) + _CM = zeros(size(C, 1), length(κ)) + S = similar(C) + return MDFT(σ, α, τ, γ, κ, ϕ1, ϕ2, β, S, C, _CM, 0) +end + +function MDFT(; + n_alternatives, + σ, + α, + τ, + γ, + κ, + ϕ1, + ϕ2, + β, + C = make_default_contrast(n_alternatives) +) + return MDFT(σ, α, τ, γ, κ, ϕ1, ϕ2, β, C) +end + +function params(d::MDFT) + return (d.σ, d.α, d.τ, d.γ, d.κ, d.ϕ1, d.ϕ2, d.β, d.C) +end + +n_options(d::AbstractMDFT) = size(d.C, 1) + +""" + rand( + rng::AbstractRNG, + dist::MDFT, + n_sim::Int, + M::AbstractArray; + Δt = 0.001 + ) + +Generate `n_sim` random choice-rt pairs for the Multi-attribute Decision Field Theory (MDFT). + +# Arguments + +- `rng::AbstractRNG`: a random number generator which is a subtype of `AbstractRNG` +- `dist::MDFT`: model object for the Multi-attribute Decision Field Theory (MDFT). +- `n_sim::Int`: the number of simulated choice-rt pairs +- `M::AbstractArray`: an alternative × attribute value matrix representing the value of the stimuli + +# Keywords + +- `Δt = 0.001`: time step size +""" +function rand( + rng::AbstractRNG, + dist::MDFT, + n_sim::Int, + M::AbstractArray; + Δt = 0.001 +) + n_options = size(M, 1) + x = fill(0.0, n_options) + Δμ = fill(0.0, n_options) + ϵ = fill(0.0, n_options) + choices = fill(0, n_sim) + rts = fill(0.0, n_sim) + dist._CM = dist.C * M * dist.γ + distances = compute_distances(dist, M) + dist.S = compute_feedback_matrix(dist, distances) + for i ∈ 1:n_sim + choices[i], rts[i] = _rand(rng, dist, x, Δμ; Δt) + x .= 0.0 + end + return (; choices, rts) +end + +rand(dist::MDFT, M::AbstractArray; Δt = 0.001) = rand(Random.default_rng(), dist, M; Δt) + +rand(dist::MDFT, n_sim::Int, M::AbstractArray; Δt = 0.001) = + rand(Random.default_rng(), dist, n_sim, M; Δt) + +function rand(rng::AbstractRNG, dist::MDFT, M::AbstractArray; Δt = 0.001) + (; _CM, C, γ) = dist + n_options = size(M, 1) + # evidence for each alternative + x = fill(0.0, n_options) + # mean change in evidence for each alternative + Δμ = fill(0.0, n_options) + # precompute matric multiplication + _CM .= C * M * γ + return _rand(rng, dist, x, Δμ; Δt) +end + +function _rand(rng::AbstractRNG, dist::MDFT, x, Δμ; Δt = 0.001) + (; α, τ) = dist + t = 0.0 + dist._att_idx = rand(1:2) + while all(x .< α) + increment!(rng, dist, x, Δμ; Δt) + t += Δt + end + _, choice = findmax(x) + rt = t + τ + return (; choice, rt) +end + +""" + increment!(rng::AbstractRNG, dist::MDFT, x, Δμ; Δt) + +Increments the preference states `x` on each time step. + +# Arguments + +- `rng::AbstractRNG`: a random number generator which is a subtype of `AbstractRNG` +- `dist::AbstractMDFT`: model object for the Multi-attribute Decision Field Theory (MDFT). +- `x`: a vector of preference states +- `Δμ`: a vector of mean change in the preference states + +# Keywords + +- `Δt = 0.001`: time step size +""" +function increment!(rng::AbstractRNG, dist::MDFT, x, Δμ; Δt) + (; σ, _CM) = dist + n_options = size(_CM, 1) + att_idx = update_attention(dist; Δt) + dist._att_idx = att_idx + v = @view _CM[:, att_idx] + compute_mean_evidence!(dist, x, Δμ, v) + x .+= Δμ * Δt .+ rand(rng, Normal(0, dist.σ * √Δt), n_options) + return nothing +end + +increment!(dist::MDFT, x, Δμ; Δt) = increment!(Random.default_rng(), dist, x, Δμ; Δt) + +""" + make_default_contrast(n) + +Creates an alternative × alternative contrast matrix representing comparisions between alternatives. +The contrast has the following properties: + +1. The value of the diagonals are 1 +2. The rows sum to 0 +3. The off diagonal values are equal + +# Arguments + +- `n`: the number of alternatives in the `M` matrix + +# Example + +```julia +make_default_contrast(3) +3×3 Matrix{Float64}: + 1.0 -0.5 -0.5 + -0.5 1.0 -0.5 + -0.5 -0.5 1.0 +``` +""" +function make_default_contrast(n) + C = fill(0.0, n, n) + C .= -1 / (n - 1) + for r ∈ 1:n + C[r, r] = 1.0 + end + return C +end + +""" + update_attention(dist::MDFT; Δt) + +Switch attention to different attribute based on exponential waiting time. + +# Arguments + +- `dist::MDFT`: a model object for simulating MDFT + +# Keywords + +- `Δt`: duration of time step +""" +function update_attention(dist::MDFT; Δt) + (; κ, _att_idx) = dist + if rand() ≤ prob_switch(κ[_att_idx], Δt) + return _att_idx == 1 ? 2 : 1 + end + return _att_idx +end + +""" + compute_distances(dist::MDFT, M) + +Computes distance of stimuli in terms of the dominance and indifference dimensions on a unit plane. + +# Arguments + +- `dist::MDFT`: a model object for simulating MDFT +- `D`: alternative × alternative distance matrix computed by `compute_distances` + +# References + +Hotaling, J. M., Busemeyer, J. R., & Li, J. (2010). Theoretical developments in decision +field theory: Comment on tsetsos, usher, and chater (2010). Psychological Review, 117, 1294-1298. +""" +function compute_distances(dist::MDFT, M) + (; β) = dist + # number of alternatives + n = size(M, 1) + D = fill(0.0, n, n) + for i ∈ 1:n + for j ∈ (i + 1):n + Δ1, Δ2 = (M[i, 1] - M[j, 1]) / √(2), (M[i, 2] - M[j, 2]) / √(2) + D[i, j] = D[j, i] = (Δ2 - Δ1)^2 + β * (Δ2 + Δ1)^2 + end + end + return D +end + +""" + compute_feedback_matrix(dist::MDFT, D) + +Computes feedback matrix `S` for Multi-attribute decision field theory (MDFT). The magnitude of self-connections and inhibitory +connections are inversely proportional to distance between alternatives in attribute space. + +# Arguments + +- `dist::MDFT`: a model object for simulating MDFT +- `D`: alternative × alternative distance matrix computed by `compute_distances` + +# References + +Hotaling, J. M., Busemeyer, J. R., & Li, J. (2010). Theoretical developments in decision +field theory: Comment on tsetsos, usher, and chater (2010). Psychological Review, 117, 1294-1298. +""" +function compute_feedback_matrix(dist::MDFT, D) + (; ϕ1, ϕ2) = dist + n = size(D, 1) + S = fill(0.0, n, n) + for i ∈ 1:n + for j ∈ i:n + δ = i == j ? 1 : 0 + S[i, j] = S[j, i] = δ - ϕ2 * exp(-ϕ1 * D[i, j]^2) + end + end + return S +end + +prob_switch(κ, Δt) = 1 - exp(-κ * Δt) + +""" + simulate(model::MDFT, M::AbstractArray; Δt = 0.001, _...) + +Returns a matrix containing evidence samples of the MDFT decision process. In the matrix, rows +represent samples of evidence per time step and columns represent different accumulators. + +# Arguments + +- `model::MDFT`: an MDFT model object +- `M::AbstractArray`: an alternative × attribute value matrix representing the value of the stimuli +""" +function simulate(model::MDFT, M::AbstractArray; Δt = 0.001, _...) + (; α, C, γ, _CM) = model + n = size(M, 1) + x = fill(0.0, n) + μΔ = fill(0.0, n) + t = 0.0 + _CM .= C * M * γ + model._att_idx = rand(1:2) + distances = compute_distances(model, M) + model.S = compute_feedback_matrix(model, distances) + evidence = [fill(0.0, n)] + time_steps = [t] + while all(x .< α) + t += Δt + increment!(model, x, μΔ; Δt) + push!(evidence, copy(x)) + push!(time_steps, t) + end + return time_steps, reduce(vcat, transpose.(evidence)) +end diff --git a/src/RDM.jl b/src/RDM.jl index 5bb93213..645e830f 100644 --- a/src/RDM.jl +++ b/src/RDM.jl @@ -192,14 +192,14 @@ function simulate(rng::AbstractRNG, model::AbstractRDM; Δt = 0.001) time_steps = [t] while all(x .< α) t += Δt - increment!(rng, model, x, ϵ, ν, Δt) + increment!(rng, model, x, ϵ, ν; Δt) push!(evidence, deepcopy(x)) push!(time_steps, t) end return time_steps, reduce(vcat, transpose.(evidence)) end -function increment!(rng::AbstractRNG, model::AbstractRDM, x, ϵ, ν, Δt) +function increment!(rng::AbstractRNG, model::AbstractRDM, x, ϵ, ν; Δt) ϵ .= rand(rng, Normal(0.0, 1.0), length(ν)) x .+= ν * Δt + ϵ * √(Δt) return nothing diff --git a/src/SequentialSamplingModels.jl b/src/SequentialSamplingModels.jl index 2b5c4498..a4e53b1f 100644 --- a/src/SequentialSamplingModels.jl +++ b/src/SequentialSamplingModels.jl @@ -11,7 +11,9 @@ using FunctionZeros using PrettyTables using Random using SpecialFunctions + using HCubature: hcubature +using StatsBase: Weights import Base: length import Distributions: AbstractRNG @@ -36,11 +38,13 @@ export AbstractCDDM export AbstractLBA export AbstractLCA export AbstractLNR +export AbstractMDFT export AbstractPoissonRace export AbstractRDM export AbstractstDDM export AbstractWald export aDDM +export ClassicMDFT export CDDM export DDM export ExGaussian @@ -49,6 +53,7 @@ export LBA export LCA export LNR export maaDDM +export MDFT export PoissonRace export SSM1D export SSM2D @@ -96,4 +101,6 @@ include("ext_functions.jl") include("ex_gaussian.jl") include("poisson_race.jl") include("stDDM.jl") +include("MDFT.jl") +include("ClassicMDFT.jl") end diff --git a/src/type_system.jl b/src/type_system.jl index b4610721..ba06e964 100644 --- a/src/type_system.jl +++ b/src/type_system.jl @@ -65,6 +65,11 @@ An abstract type for the leaky competing accumulator model """ abstract type AbstractLCA <: SSM2D end +""" + AbstractMDFT <: SSM2D +""" +abstract type AbstractMDFT <: SSM2D end + """ AbstractPoissonRace <: SSM2D @@ -102,6 +107,10 @@ Has approximate PDF based on kernel density estimator. """ struct Approximate <: PDFType end +get_simulator_type(d::SSM1D) = Exact +get_simulator_type(d::SSM2D) = Exact +get_simulator_type(d::ContinuousMultivariateSSM) = Exact + get_pdf_type(d::SSM1D) = Exact get_pdf_type(d::SSM2D) = Exact get_pdf_type(d::ContinuousMultivariateSSM) = Exact @@ -204,23 +213,31 @@ available for a given model. # Arguments - `d::SSM2D`: a 2D sequential sampling model. - `choice::Int`: the number of simulated choices and rts -- `ub=10`: upper bound of integration +- `ub::Real`: upper bound of integration +- `args...`: optional arguments passed to `rand` """ -function cdf(d::SSM2D, choice::Int, ub = 10) - return cdf(get_pdf_type(d), d, choice, ub) +function cdf(d::SSM2D, choice::Int, ub::Real, args...) + return cdf(get_pdf_type(d), d, choice, ub, args...) end -function cdf(::Type{<:Exact}, d::SSM2D, choice::Int, ub = 10) - return hcubature(t -> pdf(d, choice, t[1]), [d.τ], [ub])[1]::Float64 +function cdf(::Type{<:Exact}, d::SSM2D, choice::Int, ub::Real, args...) + return hcubature(t -> pdf(d, choice, t[1], args...), [d.τ], [ub])[1]::Float64 end -function cdf(::Type{<:Approximate}, d::SSM2D, choice::Int, ub = 10; n_sim = 10_000) - c, rt = rand(d, n_sim) +function cdf( + ::Type{<:Approximate}, + d::SSM2D, + choice::Int, + ub::Real, + args...; + n_sim = 10_000 +) + c, rt = rand(d, n_sim, args...) return mean(c .== choice .&& rt .≤ ub) end -function survivor(d::SSM2D, choice::Int, ub = 10) - return 1 - cdf(d, choice, ub) +function survivor(d::SSM2D, choice::Int, ub::Real, args...) + return 1 - cdf(d, choice, ub, args...) end """ diff --git a/test/lca_tests.jl b/test/lca_tests.jl index 882a1de6..fa6294ec 100644 --- a/test/lca_tests.jl +++ b/test/lca_tests.jl @@ -62,33 +62,32 @@ using SequentialSamplingModels: compute_mean_evidence! using Test - β = 0.20 - λ = 0.10 - ν = [2.5, 2.0] + parms = (α = 1.5, β = 0.20, λ = 0.10, ν = [2.5, 2.0], τ = 0.30, σ = 1.0) + + model = LCA(; parms...) + Δμ = [0.0, 0.0] x = [1.0, 2.0] - compute_mean_evidence!(ν, β, λ, x, Δμ) + compute_mean_evidence!(model, x, Δμ) @test Δμ[1] ≈ (2.5 - 0.1 - 0.4) @test Δμ[2] ≈ (2.0 - 0.2 - 0.2) - β = 0.00 - λ = 0.00 - ν = [2.5, 2.0] + parms = (α = 1.5, β = 0.00, λ = 0.00, ν = [2.5, 2.0], τ = 0.30, σ = 1.0) + model = LCA(; parms...) Δμ = [0.0, 0.0] x = [1.0, 2.0] - compute_mean_evidence!(ν, β, λ, x, Δμ) + compute_mean_evidence!(model, x, Δμ) @test Δμ[1] ≈ 2.5 @test Δμ[2] ≈ 2.0 - β = 0.20 - λ = 0.10 - ν = [0.0, 0.0] + parms = (α = 1.5, β = 0.20, λ = 0.10, ν = [0.0, 0.0], τ = 0.30, σ = 1.0) + model = LCA(; parms...) Δμ = [0.0, 0.0] x = [1.0, 2.0] - compute_mean_evidence!(ν, β, λ, x, Δμ) + compute_mean_evidence!(model, x, Δμ) @test Δμ[1] ≈ (-0.1 - 0.4) @test Δμ[2] ≈ (-0.2 - 0.2) end @@ -101,23 +100,25 @@ Random.seed!(6521) Δt = 0.001 - β = 0.20 - λ = 0.10 - σ = 0.10 - ν = [2.5, 2.0] + model = LCA(; + ν = [2.5, 2.0], + β = 0.20, + λ = 0.10, + σ = 0.10 + ) + Δμ = [0.0, 0.0] x = [0.0, 0.0] - ϵ = [0.0, 0.0] n_reps = 1000 evidence = fill(0.0, n_reps, 2) for i ∈ 1:n_reps x .= 1.0 - increment!(ν, β, λ, σ, Δt, x, Δμ, ϵ) + increment!(model, x, Δμ; Δt) evidence[i, :] = x end - true_std = σ * sqrt(Δt) + true_std = model.σ * sqrt(Δt) true_means = [(2.5 - 0.1 - 0.4) (2.0 - 0.2 - 0.2)] * Δt .+ 1.0 @test mean(evidence, dims = 1) ≈ true_means atol = 5e-4 diff --git a/test/mdft_test_functions.jl b/test/mdft_test_functions.jl new file mode 100644 index 00000000..83d401b5 --- /dev/null +++ b/test/mdft_test_functions.jl @@ -0,0 +1,77 @@ +function test_context_effect(parms, rand_parms, M; test_func, n_sim = 1000) + probs2, probs3 = simulate_context_effect(parms, rand_parms, M; n_sim) + return test_func(probs2, probs3) +end + +function simulate_context_effect(parms, rand_parms, M; n_sim = 1000) + n_alternatives = 2 + model = MDFT(; n_alternatives, parms..., rand_parms...) + M2 = M[1:n_alternatives, :] + choices, _ = rand(model, n_sim, M2) + probs2 = map(c -> mean(choices .== c), 1:n_alternatives) + + n_alternatives = 3 + model = MDFT(; n_alternatives, parms..., rand_parms...) + choices, _ = rand(model, n_sim, M) + probs3 = map(c -> mean(choices .== c), 1:n_alternatives) + return probs2, probs3 +end + +test_attraction(probs2, probs3) = (probs3[1] - probs2[2]) ≥ 0.005 +test_compromise(probs2, probs3) = + isapprox(probs2..., atol = 0.005) && ((probs3[2] - probs3[1]) ≥ 0.005) && + ((probs3[2] - probs3[3]) ≥ 0.005) +test_similarity(probs2, probs3) = + ((probs2[1] - probs2[2]) ≥ 0.005) && ((probs3[2] - probs3[1]) ≥ 0.005) + +function run_tests(parms; Ma, Ms, Mc) + rand_parms = ( + ϕ1 = round(rand(Uniform(0.5, 1)), digits = 2), + ϕ2 = round(rand(Uniform(0, 2)), digits = 2), + β = round(rand(Uniform(5, 15)), digits = 2), + κ = round.(fill(rand(Uniform(1, 20)), 2), digits = 2) + ) + attraction = test_context_effect(parms, rand_parms, Ma; test_func = test_attraction) + compromise = test_context_effect(parms, rand_parms, Mc; test_func = test_compromise) + similarity = test_context_effect(parms, rand_parms, Ms; test_func = test_similarity) + return (; attraction, compromise, similarity, rand_parms...) +end + +# parms = ( +# σ = 0.1, +# α = 0.50, +# τ = 0.0, +# γ = 1.0, +# κ = [5.0, 5.0], +# ϕ1 = 0.01, +# ϕ2 = 5, +# β = 10.0 +# ) +# # stimuli for attraction effect +# Ma = [ +# 3.0 1.0 +# 1.0 3.0 +# 0.50 2.5 +# ] + +# # stimuli for compromise effect +# Mc = [ +# 1.0 3.0 +# 2.0 2.0 +# 3.0 1.0 +# ] + +# # stimuli for similarity effect +# Ms = [ +# 1.0 3.0 +# 3.0 1.0 +# 0.9 3.1 +# ] + +# results = map(i -> begin +# output = run_tests(parms; Ma, Ms, Mc) +# println("i $i $output") +# return output +# end, 1:100) + +# filter(x -> x.attraction && x.compromise && x.similarity, results) diff --git a/test/mdft_tests.jl b/test/mdft_tests.jl new file mode 100644 index 00000000..1b821408 --- /dev/null +++ b/test/mdft_tests.jl @@ -0,0 +1,305 @@ +@safetestset "ClassicMDFT tests" begin + @safetestset "similarity effect" begin + using SequentialSamplingModels + using Random + using Test + + Random.seed!(5484) + # non-decision time + τ = 0.300 + # diffusion noise + σ = 1.0 + # decision threshold + α = 17.5 + # attribute attention weights + w = [0.5, 0.5] + # value matrix where rows correspond to attributes, and columns correspond to options + M = [ + 1.0 3.0 + 3.0 1.0 + 0.9 3.1 + ] + # feedback matrix + S = [ + 0.9500000 -0.0122316 -0.04999996 + -0.0122316 0.9500000 -0.00903030 + -0.0499996 -0.0090303 0.95000000 + ] + + model = ClassicMDFT(; σ, α, τ, w, S) + choices, _ = rand(model, 100_000, M) + probs = map(c -> mean(choices .== c), 1:3) + ground_truth = [0.305315, 0.395226, 0.299459] + @test probs ≈ ground_truth atol = 5e-3 + end + + @safetestset "compromise effect" begin + using SequentialSamplingModels + using Random + using Test + + Random.seed!(6541) + # non-decision time + τ = 0.300 + # diffusion noise + σ = 1.0 + # decision threshold + α = 17.5 + # attribute attention weights + w = [0.5, 0.5] + # value matrix where rows correspond to attributes, and columns correspond to options + M = [ + 1.0 3.0 + 3.0 1.0 + 2.0 2.0 + ] + # feedback matrix + S = [ + 0.950000 -0.012232 -0.045788 + -0.012232 0.950000 -0.045788 + -0.045788 -0.045788 0.950000 + ] + + model = ClassicMDFT(; σ, α, τ, w, S) + choices, _ = rand(model, 100_000, M) + probs = map(c -> mean(choices .== c), 1:3) + ground_truth = [0.282626, 0.284605, 0.432769] + @test probs ≈ ground_truth atol = 5e-3 + end + + @safetestset "attraction effect" begin + using SequentialSamplingModels + using Random + using Test + + Random.seed!(201) + # non-decision time + τ = 0.300 + # diffusion noise + σ = 1.0 + # decision threshold + α = 17.5 + # attribute attention weights + w = [0.5, 0.5] + # value matrix where rows correspond to attributes, and columns correspond to options + M = [ + 1.0 3.0 + 3.0 1.0 + 0.50 2.5 + ] + # feedback matrix + S = [ + 0.950000 -0.01223200 -0.02264700 + -0.012232 0.95000000 -0.00067034 + -0.022647 -0.00067034 0.95000000 + ] + + model = ClassicMDFT(; σ, α, τ, w, S) + choices, _ = rand(model, 100_000, M) + probs = map(c -> mean(choices .== c), 1:3) + ground_truth = [0.559048, 0.440950, 0.000002] + @test probs ≈ ground_truth atol = 5e-3 + end +end + +@safetestset "MDFT" begin + @safetestset "compute_distances" begin + using SequentialSamplingModels + using SequentialSamplingModels: compute_distances + using Random + using Test + + model = MDFT(; + n_alternatives = 3, + σ = 1.0, + α = 1.0, + τ = 0.30, + γ = 1.0, + κ = [0.1, 0.2], + ϕ1 = 0.01, + ϕ2 = 0.10, + β = 10.0 + ) + + M = [ + 1 3 + 2 2 + 0 2 + ] + distances = compute_distances(model, M) + ground_truth = [ + 0 2 20 + 2 0 22 + 20 22 0 + ] + + @test distances ≈ ground_truth atol = 5e-3 + end + + @safetestset "compute_distances" begin + using SequentialSamplingModels + using SequentialSamplingModels: compute_distances + using SequentialSamplingModels: compute_feedback_matrix + using Random + using Test + + model = MDFT(; + n_alternatives = 3, + σ = 1.0, + α = 1.0, + τ = 0.30, + γ = 1.0, + κ = [0.1, 0.2], + ϕ1 = 0.01, + ϕ2 = 0.10, + β = 10.0 + ) + + M = [ + 1 3 + 2 2 + 0 2 + ] + distances = compute_distances(model, M) + S = compute_feedback_matrix(model, distances) + ground_truth = [ + 0.90 -0.0961 -0.0018 + -0.0961 0.90 -0.0008 + -0.0018 -0.0008 0.90 + ] + + @test S ≈ ground_truth atol = 5e-3 + end + + @safetestset "make_default_contrast" begin + using LinearAlgebra + using SequentialSamplingModels + using SequentialSamplingModels: make_default_contrast + using Random + using Test + + offdiag(A) = (A[ι] for ι in CartesianIndices(A) if ι[1] ≠ ι[2]) + + C = make_default_contrast(3) + + @test size(C) == (3, 3) + @test C[diagind(C)] ≈ fill(1, 3) + @test all(x -> x == -0.5, offdiag(C)) + end + + @safetestset "similarity effect" begin + using SequentialSamplingModels + using Distributions + using Random + using Test + include("mdft_test_functions.jl") + + Random.seed!(62541) + + parms = ( + σ = 0.1, + α = 0.50, + τ = 0.0, + γ = 1.0, + ϕ1 = 0.01, + ϕ2 = 0.1, + β = 10, + κ = [5, 5] + ) + + model = MDFT(; n_alternatives = 3, parms...) + + M = [ + 1.0 3.0 + 3.0 1.0 + 0.9 3.1 + ] + + @test test_context_effect(parms, (), M; test_func = test_similarity, n_sim = 1000) + true_probs = [0.15667, 0.54876, 0.29457] + true_mean_rts = [0.9106421, 0.5445200, 0.7405360] + choices, rts = rand(model, 10_000, M) + probs = map(c -> mean(choices .== c), 1:3) + @test probs ≈ true_probs rtol = 0.02 + mean_rts = map(c -> mean(rts[choices .== c]), 1:3) + @test mean_rts ≈ true_mean_rts rtol = 0.01 + end + + @safetestset "compromise effect" begin + using Distributions + using SequentialSamplingModels + using Random + using Test + include("mdft_test_functions.jl") + + Random.seed!(6511) + + parms = ( + σ = 0.1, + α = 1.0, + τ = 0.0, + γ = 1.0, + ϕ1 = 0.03, + ϕ2 = 1.2, + β = 10.0, + κ = [10, 10] + ) + + model = MDFT(; n_alternatives = 3, parms...) + + M = [ + 1.0 3.0 + 2.0 2.0 + 3.0 1.0 + ] + + @test test_context_effect(parms, (), M; test_func = test_compromise, n_sim = 1000) + + true_probs = [0.30025, 0.40453, 0.29522] + true_mean_rts = [2.238162, 2.886065, 2.239521] + + choices, rts = rand(model, 10_000, M) + probs = map(c -> mean(choices .== c), 1:3) + @test probs ≈ true_probs rtol = 0.02 + mean_rts = map(c -> mean(rts[choices .== c]), 1:3) + @test mean_rts ≈ true_mean_rts rtol = 0.02 + end + + @safetestset "attraction effect" begin + using Distributions + using SequentialSamplingModels + using Random + using Test + include("mdft_test_functions.jl") + Random.seed!(2141) + + parms = ( + σ = 0.1, + α = 0.50, + τ = 0.0, + γ = 1.0, + ϕ1 = 0.01, + ϕ2 = 0.1, + β = 10, + κ = [5, 5] + ) + model = MDFT(; n_alternatives = 3, parms...) + + M = [ + 3.0 1.0 + 1.0 3.0 + 0.50 2.5 + ] + + @test test_context_effect(parms, (), M; test_func = test_attraction, n_sim = 1000) + + true_probs = [0.55643, 0.44356, 0.00001] + true_mean_rts = [0.3985818, 0.5411759, 0.3875000] + + choices, rts = rand(model, 10_000, M) + probs = map(c -> mean(choices .== c), 1:3) + @test probs ≈ true_probs rtol = 0.02 + mean_rts = map(c -> mean(rts[choices .== c]), 1:3) + @test mean_rts[1:2] ≈ true_mean_rts[1:2] rtol = 0.02 + end +end diff --git a/test/plots.jl b/test/plots.jl index 019b1740..25b4d76f 100644 --- a/test/plots.jl +++ b/test/plots.jl @@ -129,6 +129,51 @@ plot_model(dist; n_sim = 10, add_density = true, density_kwargs, xlims = (0, 1.50)) end + @safetestset "MDFT" begin + using Plots + using SequentialSamplingModels + using Test + + parms = ( + σ = 0.1, + α = 0.50, + τ = 0.0, + γ = 1.0, + ϕ1 = 0.01, + ϕ2 = 0.1, + β = 10, + κ = [5, 5] + ) + + dist = MDFT(; n_alternatives = 3, parms...) + + M = [ + 1.0 3.0 + 3.0 1.0 + 0.9 3.1 + ] + + h = histogram(dist; model_args = (M,)) + plot!(h, dist; model_args = (M,)) + + histogram(dist; model_args = (M,)) + plot!(dist; model_args = (M,)) + + plot(dist; model_args = (M,)) + histogram!(dist; model_args = (M,)) + + p = plot(dist; model_args = (M,)) + histogram!(p, dist; model_args = (M,)) + + plot_model( + dist; + n_sim = 2, + add_density = true, + model_args = (M,), + density_kwargs = (; t_range = range(0.1, 1, length = 200),) + ) + end + @safetestset "Wald" begin using Plots using SequentialSamplingModels