diff --git a/config.json b/config.json index cf8b2ffe..86763fa4 100644 --- a/config.json +++ b/config.json @@ -874,6 +874,21 @@ "variables" ] }, + { + "slug": "perceptron", + "name": "Perceptron", + "uuid": "b43a938a-7bd2-4fe4-b16c-731e2e25e747", + "practices": [], + "prerequisites": [], + "difficulty": 3, + "topics": [ + "machine learning", + "loops", + "arrays", + "logic", + "math" + ] + }, { "slug": "space-age", "name": "Space Age", @@ -889,6 +904,18 @@ "practices": [], "prerequisites": [], "difficulty": 1 + }, + { + "slug": "binary-search-tree", + "name": "Binary Search Tree", + "uuid": "f5df0c95-30c7-40f3-8ec6-fcfb3e28cdf4", + "practices": [], + "prerequisites": [], + "difficulty": 7, + "topics": [ + "structs", + "data_structures" + ] } ] }, diff --git a/exercises/practice/binary-search-tree/.docs/instructions.md b/exercises/practice/binary-search-tree/.docs/instructions.md new file mode 100644 index 00000000..c9bbba5b --- /dev/null +++ b/exercises/practice/binary-search-tree/.docs/instructions.md @@ -0,0 +1,47 @@ +# Instructions + +Insert and search for numbers in a binary tree. + +When we need to represent sorted data, an array does not make a good data structure. + +Say we have the array `[1, 3, 4, 5]`, and we add 2 to it so it becomes `[1, 3, 4, 5, 2]`. +Now we must sort the entire array again! +We can improve on this by realizing that we only need to make space for the new item `[1, nil, 3, 4, 5]`, and then adding the item in the space we added. +But this still requires us to shift many elements down by one. + +Binary Search Trees, however, can operate on sorted data much more efficiently. + +A binary search tree consists of a series of connected nodes. +Each node contains a piece of data (e.g. the number 3), a variable named `left`, and a variable named `right`. +The `left` and `right` variables point at `nil`, or other nodes. +Since these other nodes in turn have other nodes beneath them, we say that the left and right variables are pointing at subtrees. +All data in the left subtree is less than or equal to the current node's data, and all data in the right subtree is greater than the current node's data. + +For example, if we had a node containing the data 4, and we added the data 2, our tree would look like this: + + 4 + / + 2 + +If we then added 6, it would look like this: + + 4 + / \ + 2 6 + +If we then added 3, it would look like this + + 4 + / \ + 2 6 + \ + 3 + +And if we then added 1, 5, and 7, it would look like this + + 4 + / \ + / \ + 2 6 + / \ / \ + 1 3 5 7 diff --git a/exercises/practice/binary-search-tree/.meta/config.json b/exercises/practice/binary-search-tree/.meta/config.json new file mode 100644 index 00000000..d38e32cc --- /dev/null +++ b/exercises/practice/binary-search-tree/.meta/config.json @@ -0,0 +1,16 @@ +{ + "authors": [], + "files": { + "solution": [ + "binary-search-tree.jl" + ], + "test": [ + "runtests.jl" + ], + "example": [ + ".meta/example.jl" + ] + }, + "blurb": "Insert and search for numbers in a binary tree.", + "source": "Josh Cheek" +} diff --git a/exercises/practice/binary-search-tree/.meta/example.jl b/exercises/practice/binary-search-tree/.meta/example.jl new file mode 100644 index 00000000..936f2024 --- /dev/null +++ b/exercises/practice/binary-search-tree/.meta/example.jl @@ -0,0 +1,38 @@ +mutable struct BinarySearchTree + data + left + right + BinarySearchTree(node::T) where T<:Real = new(node, nothing, nothing) +end + +function BinarySearchTree(vec::Vector{T}) where T<:Real + tree = BinarySearchTree(popfirst!(vec)) + foreach(node -> push!(tree, node), vec) + tree +end + +function Base.in(node, tree::BinarySearchTree) + tree.data == node && return true + if node ≤ tree.data + isnothing(tree.left) ? false : in(node, tree.left) + else + isnothing(tree.right) ? false : in(node, tree.right) + end +end + +function Base.push!(tree::BinarySearchTree, node) + if node ≤ tree.data + isnothing(tree.left) ? (tree.left = BinarySearchTree(node)) : push!(tree.left, node) + else + isnothing(tree.right) ? (tree.right = BinarySearchTree(node)) : push!(tree.right, node) + end + tree +end + +function traverse(tree::BinarySearchTree, channel::Channel) + !isnothing(tree.left) && traverse(tree.left, channel) + put!(channel, tree.data) + !isnothing(tree.right) && traverse(tree.right, channel) +end + +Base.sort(tree::BinarySearchTree) = collect(Channel(channel -> traverse(tree, channel))) diff --git a/exercises/practice/binary-search-tree/.meta/tests.toml b/exercises/practice/binary-search-tree/.meta/tests.toml new file mode 100644 index 00000000..c7d32021 --- /dev/null +++ b/exercises/practice/binary-search-tree/.meta/tests.toml @@ -0,0 +1,40 @@ +# This is an auto-generated file. +# +# Regenerating this file via `configlet sync` will: +# - Recreate every `description` key/value pair +# - Recreate every `reimplements` key/value pair, where they exist in problem-specifications +# - Remove any `include = true` key/value pair (an omitted `include` key implies inclusion) +# - Preserve any other key/value pair +# +# As user-added comments (using the # character) will be removed when this file +# is regenerated, comments can be added via a `comment` key. + +[e9c93a78-c536-4750-a336-94583d23fafa] +description = "data is retained" + +[7a95c9e8-69f6-476a-b0c4-4170cb3f7c91] +description = "insert data at proper node -> smaller number at left node" + +[22b89499-9805-4703-a159-1a6e434c1585] +description = "insert data at proper node -> same number at left node" + +[2e85fdde-77b1-41ed-b6ac-26ce6b663e34] +description = "insert data at proper node -> greater number at right node" + +[dd898658-40ab-41d0-965e-7f145bf66e0b] +description = "can create complex tree" + +[9e0c06ef-aeca-4202-b8e4-97f1ed057d56] +description = "can sort data -> can sort single number" + +[425e6d07-fceb-4681-a4f4-e46920e380bb] +description = "can sort data -> can sort if second number is smaller than first" + +[bd7532cc-6988-4259-bac8-1d50140079ab] +description = "can sort data -> can sort if second number is same as first" + +[b6d1b3a5-9d79-44fd-9013-c83ca92ddd36] +description = "can sort data -> can sort if second number is greater than first" + +[d00ec9bd-1288-4171-b968-d44d0808c1c8] +description = "can sort data -> can sort complex tree" diff --git a/exercises/practice/binary-search-tree/binary-search-tree.jl b/exercises/practice/binary-search-tree/binary-search-tree.jl new file mode 100644 index 00000000..f95b91b8 --- /dev/null +++ b/exercises/practice/binary-search-tree/binary-search-tree.jl @@ -0,0 +1,2 @@ +# Create a (Mutable) Struct BinarySearchTree, which has fields: data, left, right +# Also write a sort method, which returns a sorted array of the elements in a BinarySearchTree \ No newline at end of file diff --git a/exercises/practice/binary-search-tree/runtests.jl b/exercises/practice/binary-search-tree/runtests.jl new file mode 100644 index 00000000..b60fd3a7 --- /dev/null +++ b/exercises/practice/binary-search-tree/runtests.jl @@ -0,0 +1,85 @@ +using Test +include("binary-search-tree.jl") + +@testset "data is retained" begin + tree = BinarySearchTree([4]) + @test tree.data == 4 + @test isnothing(tree.left) + @test isnothing(tree.right) +end + +@testset "insert data at proper node" begin + @testset "smaller number at left node" begin + tree = BinarySearchTree([4, 2]) + @test tree.data == 4 + @test tree.left.data == 2 + @test isnothing(tree.left.left) + @test isnothing(tree.left.right) + @test isnothing(tree.right) + end + + @testset "same number at left node" begin + tree = BinarySearchTree([4, 4]) + @test tree.data == 4 + @test tree.left.data == 4 + @test isnothing(tree.left.left) + @test isnothing(tree.left.right) + @test isnothing(tree.right) + end + + @testset "greater number at right node" begin + tree = BinarySearchTree([4, 5]) + @test tree.data == 4 + @test isnothing(tree.left) + @test tree.right.data == 5 + @test isnothing(tree.right.left) + @test isnothing(tree.right.right) + end +end + +@testset "can create complex tree" begin + tree = BinarySearchTree([4, 2, 6, 1, 3, 5, 7]) + @test tree.data == 4 + @test tree.left.data == 2 + @test tree.left.left.data == 1 + @test isnothing(tree.left.left.left) + @test isnothing(tree.left.left.right) + @test tree.left.right.data == 3 + @test isnothing(tree.left.right.left) + @test isnothing(tree.left.right.right) + @test tree.right.data == 6 + @test tree.right.left.data == 5 + @test isnothing(tree.right.left.left) + @test isnothing(tree.right.left.right) + @test tree.right.right.data == 7 + @test isnothing(tree.right.right.left) + @test isnothing(tree.right.right.right) + +end + +@testset "can sort data" begin + @testset "can sort single number" begin + tree = BinarySearchTree([2]) + @test sort(tree) == [2] + end + + @testset "can sort if second number is smaller than first" begin + tree = BinarySearchTree([2, 1]) + @test sort(tree) == [1, 2] + end + + @testset "can sort if second number is same as first" begin + tree = BinarySearchTree([2, 2]) + @test sort(tree) == [2, 2] + end + + @testset "can sort if second number is greater than first" begin + tree = BinarySearchTree([2, 3]) + @test sort(tree) == [2, 3] + end + + @testset "can sort complex tree" begin + tree = BinarySearchTree([4, 2, 6, 1, 3, 5, 7]) + @test sort(tree) == [1, 2, 3, 4, 5, 6, 7] + end +end diff --git a/exercises/practice/perceptron/.docs/instructions.md b/exercises/practice/perceptron/.docs/instructions.md new file mode 100644 index 00000000..ca03062a --- /dev/null +++ b/exercises/practice/perceptron/.docs/instructions.md @@ -0,0 +1,27 @@ +# Instructions + +#### Updating +Checking if an object is on one side of a hyperplane or another can be done by checking the normal vector which points to the object. The value will be positive, negative or zero, so all of the objects from a class should have normal vectors with the same sign. A zero value means the object is on the hyperplane, which we don't want to allow since its ambiguous. Checking the sign of a normal to a hyperplane might sound like it could be complicated, but it's actually quite easy. Simply plug in the coordinates for the object into the equation for the hyperplane and check the sign of the result. For example, we can look at two objects `v₁, v₂` in relation to the hyperplane `[w₀, w₁, w₂] = [1, 1, 1]`: + +`v₁ = [x₁, y₁] = [2, 2]` + +`w₀ + w₁⋅x₁ + w₂⋅y₁ = 1 + 1⋅2 + 1⋅2 = 5 > 0` + + +`v₂ = [x₂, y₂] = [-2, -2]` + +`w₀ + w₁⋅x₂ + w₂⋅y₂ = 1 + 1⋅(-2) + 1⋅(-2) = -3 < 0` + +If `v₁` and `v₂` have the labels `1` and `-1` (like we will be using), then the hyperplane `[1, 1, 1]` is a valid decision boundary for them since the signs match. + +Now that we know how to tell which side of the hyperplane an object lies on, we can look at how perceptron updates a hyperplane. If an object is on the correct side of the hyperplane, no update is performed on the weights. However, if we find an object on the wrong side, the update rule for the weights is: + +`[w₀', w₁', w₂'] = [w₀ + 1ₗ, w₁ + x⋅1ₗ, w₂ + y⋅1ₗ]` + +Where `1ₗ ∈ {1, -1}`, according to the class of the object (i.e. its label), `x, y` are the coordinates of the object, the `wᵢ` are the weights of the hyperplane and the `wᵢ'` are the weights of the updated hyperplane. + +This update is repeated for each object in turn, and then the whole process repeated until there are no updates made to the hyperplane. All objects passing without an update means they have been successfully separated and you can return your decision boundary! + +Notes: +- Although the perceptron algorithm is deterministic, a decision boundary depends on initialization and is not unique in general, so the tests accept any hyperplane which fully separates the objects. +- The tests here will only include linearly separable classes, so a decision boundary will always be possible (i.e. no need to worry about non-separable classes). diff --git a/exercises/practice/perceptron/.docs/introduction.md b/exercises/practice/perceptron/.docs/introduction.md new file mode 100644 index 00000000..f1b8f8b8 --- /dev/null +++ b/exercises/practice/perceptron/.docs/introduction.md @@ -0,0 +1,20 @@ +# Introduction + +### Perceptron +[Perceptron](https://en.wikipedia.org/wiki/Perceptron) is one of the oldest and bestestly named machine learning algorithms out there. Since it is also quite simple to implement, it's a favorite place to start a machine learning journey. Perceptron is what is known as a linear classifier, which means that, if we have two labled classes of objects, for example in 2D space, it will search for a line that can be drawn to separate them. If such a line exists, Perceptron is guaranteed to find one. See Perceptron in action separating black and white dots below! + +
+ +### Details +The basic idea is fairly straightforward. As illustrated above, we cycle through the objects and check if they are on the correct side of our guess at a line. If one is not, we make a correction and continue checking the objects against the corrected line. Eventually the line is adjusted to correctly separate all the objects and we have what is called a decision boundary! + +Why is this of any use? The decision boundary found can then help us in predicting what a new, unlabeled, object would likely be classified as by seeing which side of the boundary it is on. + +#### A Brief Word on Hyperplanes +What we have been calling a line in 2D can be generalized to something called a [hyperplane](https://en.wikipedia.org/wiki/Hyperplane), which is a convenient representation, and, if you follow the classic Perceptron algorithm, you will have to pick an initial hyperplane to start with. How do you pick your starting hyperplane? It's up to you! Be creative! Or not... Actually perceptron's convergence times are sensitive to conditions such as the initial hyperplane and even the order the objects are looped through, so you might not want to go too wild. + +We will be playing in a two dimensional space, so our separating hyperplane will simply be a 1D line. You might remember the standard equation for a line as `y = ax+b`, where `a,b ∈ ℝ`, however, in order to help generalize the idea to higher dimensions, it's convenient to reformulate this equation as `w₀ + w₁x + w₂y = 0`. This is the form of the hyperplane we will be using, so your output should be `[w₀, w₁, w₂]`. In machine learning, the `{w₀, w₁, w₂}` are usually referred to as weights. + +Scaling a hyperplane by a positive value gives an equivalent hyperplane (e.g. `[w₀, w₁, w₂] ≈ [c⋅w₀, c⋅w₁, c⋅w₂]` with `c > 0`), since an infinite line scaled by a value is just the same infinite line. However, it should be noted that there is a difference between the normal vectors (the green arrow in illustration above) associated with hyperplanes scaled by a negative value (e.g. `[w₀, w₁, w₂]` vs `[-w₀, -w₁, -w₂]`) in that the normal to the negative hyperplane points in opposite direction of that of the positive one. By convention, the perceptron normal points towards the class defined as positive. diff --git a/exercises/practice/perceptron/.meta/config.json b/exercises/practice/perceptron/.meta/config.json new file mode 100644 index 00000000..f9c836dd --- /dev/null +++ b/exercises/practice/perceptron/.meta/config.json @@ -0,0 +1,18 @@ +{ + "authors": [], + "contributors": [ + "cmcaine" + ], + "files": { + "solution": [ + "perceptron.jl" + ], + "test": [ + "runtests.jl" + ], + "example": [ + ".meta/example.jl" + ] + }, + "blurb": "Write your own machine learning algorithm" +} diff --git a/exercises/practice/perceptron/.meta/example.jl b/exercises/practice/perceptron/.meta/example.jl new file mode 100644 index 00000000..ec30a831 --- /dev/null +++ b/exercises/practice/perceptron/.meta/example.jl @@ -0,0 +1,33 @@ +function classify(point, hyperplane) + # Takes a single point and a hyperplane + # Classifies which nomrmal of hyperplane is associated with the point + # Returns 1 for positive normal, -1 for negative normal and 0 for a point on the hyperplane + sign(hyperplane' * point) +end + +function update(point, label, hyperplane) + # Takes one point, its label and a hyperplane + # Updates the hyperplane conditional on classification not matching the label + # Returns a vector, the Perceptron updated hyperplane + hyperplane + (classify(point, hyperplane) != label) * label * point +end + +function step(points, labels, hyperplane) + # Takes a vector of points, a vector of their associated labels and a hyperplane + # Iteratively updates the hyperplane for each point/label pair + # Returns a tuple: (true/false, decision boundary/hyperplane) for valid/invalid decision boundary, respectively + decisionboundary = hyperplane + foreach(i -> hyperplane = update(points[i], labels[i], hyperplane), eachindex(points)) + decisionboundary == hyperplane, hyperplane +end + +function perceptron(points, labels) + # Takes a vector of linearly separable points and a vector of their associated labels + # Performs steps of the Perceptron algorithm until a valid decision boundary is found + # Returns a vector, a valid decision boundary for the provided population of labeled points + hyperplane, points = [0, 0, 0], vcat.(1, points) + while true + isdecisionboundary, hyperplane = step(points, labels, hyperplane) + isdecisionboundary && return hyperplane + end +end diff --git a/exercises/practice/perceptron/.meta/tests.toml b/exercises/practice/perceptron/.meta/tests.toml new file mode 100644 index 00000000..d375c4cb --- /dev/null +++ b/exercises/practice/perceptron/.meta/tests.toml @@ -0,0 +1,20 @@ +[b8feac03-a063-44c9-8867-330cce110e6f] +description = "Boundary is a vector of three weights" + +[7220e861-e8f6-4d5f-b45a-8750e5146010] +description = "Weights are Real numbers" + +[728853d3-24de-4855-a452-6520b67dec23] +description = "Initial population" + +[ed5bf871-3923-47ca-8346-5d640f9069a0] +description = "Initial population w/ opposite labels" + +[15a9860e-f9be-46b1-86b2-989bd878c8a5] +description = "Decision boundary cannot pass through origin" + +[52ba77fc-8983-4429-91dc-e64b2f625484] +description = "Decision boundary nearly parallel with y-axis" + +[3e758bbd-5f72-447d-999f-cfa60b27bc26] +description = "Increasing Populations" diff --git a/exercises/practice/perceptron/perceptron.jl b/exercises/practice/perceptron/perceptron.jl new file mode 100644 index 00000000..bb142478 --- /dev/null +++ b/exercises/practice/perceptron/perceptron.jl @@ -0,0 +1,3 @@ +function perceptron(points, labels) + # Perceptronize! +end diff --git a/exercises/practice/perceptron/runtests.jl b/exercises/practice/perceptron/runtests.jl new file mode 100644 index 00000000..112dd2e5 --- /dev/null +++ b/exercises/practice/perceptron/runtests.jl @@ -0,0 +1,89 @@ +using Test, Random +include("perceptron.jl") + +decisionboundary = perceptron([[-1,-1], [1, 0], [0, 1]], [-1, 1, 1]) + +@testset "Boundary is a vector of three weights" begin + @test length(decisionboundary) == 3 +end + +@testset "Weights are Real numbers" begin + @test eltype(decisionboundary) <: Real +end + +function runtestset() + + @testset "Low populations" begin + + # Initial population + points = [[-1, 0], [0, -1], [1, 0], [0, 1]] + labels = [-1, -1, 1, 1] + decisionboundary = perceptron(points, labels) + reference = [0, 1, 1] #A valid decision boundary need not match the reference + @testset "Initial population - Your decision boundary: $decisionboundary" begin + @test isvalidboundary(points, labels, decisionboundary) + end + + #Initial population w/ opposite labels + points = [[-1, 0], [0, -1], [1, 0], [0, 1]] + labels = [1, 1, -1, -1] + decisionboundary = perceptron(points, labels) + reference = [0, -1, -1] #A valid decision boundary need not match the reference + @testset "Initial population w/ opposite labels - Your decision boundary: $decisionboundary" begin + @test isvalidboundary(points, labels, decisionboundary) + end + + # Decision boundary cannot pass through origin + points = [[1, 0], [0, 1], [2, 1], [1, 2]] + labels = [-1, -1, 1, 1] + decisionboundary = perceptron(points, labels) + reference = [-2, 1, 1] #A valid decision boundary need not match the reference + @testset "Decision boundary cannot pass through origin - Your decision boundary: $decisionboundary" begin + @test isvalidboundary(points, labels, decisionboundary) + end + + #Decision boundary nearly parallel with y-axis + points = [[0, 50], [0, -50], [1, 50], [1, -50]] + labels = [-1, -1, 1, 1] + decisionboundary = perceptron(points, labels) + reference = [-1, 2, 0] #A valid decision boundary need not match the reference + @testset "Decision boundary nearly parallel with y-axis - Your decision boundary: $decisionboundary" begin + @test isvalidboundary(points, labels, decisionboundary) + end + + end + + @testset "Increasing Populations" begin + for n in 10:50 + points, labels = population(n, 25) + decisionboundary = perceptron(points, labels) + @testset "Population: $n points - Your decision boundary: $decisionboundary" begin + @test isvalidboundary(points, labels, decisionboundary) + end + end + end + +end + + + +function population(n, bound) + v = !iszero(n % 10) + b, x, y = rand(-bound÷2:bound÷2), rand(-bound:bound), rand(-bound:bound)v + y_intercept = -b ÷ (iszero(y) ? 1 : y) + points, labels, hyperplane = [], [], [b, x, y] + while n > 0 + point = [rand(-bound:bound), y_intercept + rand(-bound:bound)] + label = point' * [x, y] + b + if !iszero(label) + push!(points, point) + push!(labels, sign(label)) + n -= 1 + end + end + points, labels +end +isvalidboundary(points, labels, db) = all(>(0), reduce(hcat, vcat.(1, points))' * db .* labels) + +Random.seed!(42) +runtestset()