diff --git a/CMakeLists.txt b/CMakeLists.txt index 97f21f0..e0332f3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -10,18 +10,20 @@ set(SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/src) set(HEADERS ${INCLUDE_DIR}/CaberNet.h ${INCLUDE_DIR}/CaberNet/tensor.h + ${INCLUDE_DIR}/CaberNet/tensor/tensor_float32.h + ${INCLUDE_DIR}/CaberNet/tensor/tensor_int16.h ${INCLUDE_DIR}/CaberNet/functions.h ${INCLUDE_DIR}/CaberNet/layers.h ${INCLUDE_DIR}/CaberNet/model.h - ${INCLUDE_DIR}/CaberNet/subscripts.h ${INCLUDE_DIR}/CaberNet/criterions.h ) set(SOURCES ${SOURCE_DIR}/tensor.cpp + ${SOURCE_DIR}/tensor/tensor_float32.cpp + ${SOURCE_DIR}/tensor/tensor_int16.cpp ${SOURCE_DIR}/functions.cpp ${SOURCE_DIR}/layers.cpp - ${SOURCE_DIR}/subscripts.cpp ${SOURCE_DIR}/criterions.cpp ${SOURCE_DIR}/internals/operations/internal_operation_addition.cpp ${SOURCE_DIR}/internals/operations/internal_operation_multiplication.cpp diff --git a/examples/functions.cpp b/examples/functions.cpp index 81a64a1..ecfbfa2 100644 --- a/examples/functions.cpp +++ b/examples/functions.cpp @@ -1,16 +1,6 @@ /* -To run this code build the library: -in your terminal: - -https://github.com/xEricCardozo/CaberNet-cpp.git - -and inside the folder: - -mkdir build -cd build -cmake .. -make +To run this code build the library following the instructions in the .github folder. then compile this file with: @@ -24,12 +14,12 @@ g++ functions.cpp -LCaberNet/lib -lCaberNet -I CaberNet/include int main() { // You can use enums to set the gradient requirement: - net::Tensor x({2,3}, net::requires_gradient::False); x.fill({1,2,3,4,5,6}); - net::Tensor w({4,3}, net::requires_gradient::True); w.fill({1,2,-3,4,5,6,7,8,-9,10,11,-12}); + net::Tensor x({2,3}, net::requires_gradient::False); x.fill({1,2,3,4,5,6}); + net::Tensor w({4,3}, net::requires_gradient::True); w.fill({1,2,-3,4,5,6,7,8,-9,10,11,-12}); // Or use just a boolean. Whatever you prefer. - net::Tensor b({1,4}, true); b.fill({1,2,3,4}); - net::Tensor I({2,4}, false); I.fill(1); + net::Tensor b({1,4}, true); b.fill({1,2,3,4}); + net::Tensor I({2,4}, false); I.fill(1); x = net::function::linear(x,w,b); x = net::function::relu(x); diff --git a/examples/layers.cpp b/examples/layers.cpp index a138a0e..617b3b3 100644 --- a/examples/layers.cpp +++ b/examples/layers.cpp @@ -1,3 +1,14 @@ +/* + +To run this code build the library following the instructions in the .github folder. + +then compile this file with: + +g++ layers.cpp -LCaberNet/lib -lCaberNet -I CaberNet/include +./a.out + +*/ + #include struct Autoencoder : public net::Model { @@ -17,7 +28,7 @@ struct Autoencoder : public net::Model { net::layer::LogSoftmax(1/*axis*/) }; - net::Tensor forward(net::Tensor x) { + net::Tensor forward(net::Tensor x) { x = encoder(x); x = decoder(x); return x; @@ -27,8 +38,8 @@ struct Autoencoder : public net::Model { int main() { Autoencoder model; - net::Tensor x({1, 784}); x.fill(net::initializer::He); - net::Tensor y = model(x); + net::Tensor x({1, 784}); x.fill(net::initializer::He); + net::Tensor y = model(x); y.perform(); std::cout << y; } diff --git a/examples/operations.cpp b/examples/operations.cpp index fc6d3df..7fc6817 100644 --- a/examples/operations.cpp +++ b/examples/operations.cpp @@ -1,16 +1,6 @@ /* -To run this code build the library: -in your terminal: - -https://github.com/xEricCardozo/CaberNet-cpp.git - -and inside the folder: - -mkdir build -cd build -cmake .. -make +To run this code build the library following the instructions in the .github folder. then compile this file with: @@ -23,11 +13,11 @@ g++ operations.cpp -LCaberNet/lib -lCaberNet -I CaberNet/include #include int main() { - net::Tensor x({2,3}, false); x.fill({1,2,3,4,5,6}); - net::Tensor y({2,3}, true); y.fill({1,1,1,-1,-1,-1}); - net::Tensor z({2,3}, true); z.fill(1); - net::Tensor I({2,3}, false); I.fill(1); - net::Tensor w({3,3}, true); w.fill({1,2,3,4,5,6,7,8,9}); + net::Tensor x({2,3}, false); x.fill({1,2,3,4,5,6}); + net::Tensor y({2,3}, true); y.fill({1,1,1,-1,-1,-1}); + net::Tensor z({2,3}, true); z.fill(1); + net::Tensor I({2,3}, false); I.fill(1); + net::Tensor w({3,3}, true); w.fill({1,2,3,4,5,6,7,8,9}); x = x + I; x = net::matmul(x, w); diff --git a/in-process/idea.cpp b/in-process/idea.cpp new file mode 100644 index 0000000..9496d7b --- /dev/null +++ b/in-process/idea.cpp @@ -0,0 +1,110 @@ +#include +#include +#include + +namespace internal { + +class Base { + public: + virtual ~Base() = default; +}; + +template +class Array : Base { + public: + using scalar_type = T; + using pointer = scalar_type*; + using const_pointer = const scalar_type*; + + using storage_type = std::vector; + using iterator = typename storage_type::iterator; + using const_iterator = typename storage_type::const_iterator; + + Array() = default; + Array(std::size_t size) : data_(size) { + for (std::size_t i = 0; i < size; ++i) data_[i] = 1; + } + + storage_type data_; +}; + +class Tensor : public Array { + public: + Tensor() = default; + Tensor(std::size_t size) : Array(size) {} +}; + +}; + + +namespace net { + +class integer_32 { + public: + using scalar_type = int; + using iterator = std::vector::iterator; + using const_iterator = std::vector::const_iterator; + + integer_32(std::size_t size) { + tensor_ = std::make_shared>(size); + } + + iterator begin() { return tensor_->data_.begin(); } + iterator end() { return tensor_->data_.end(); } + + const_iterator begin() const { return tensor_->data_.cbegin(); } + const_iterator end() const { return tensor_->data_.cend(); } + + private: + std::shared_ptr> tensor_; +}; + +class float_32 { + public: + using scalar_type = float; + using iterator = std::vector::iterator; + using const_iterator = std::vector::const_iterator; + + float_32(std::size_t size) { + tensor_ = std::make_shared(size); + } + + iterator begin() { return tensor_->data_.begin(); } + iterator end() { return tensor_->data_.end(); } + + const_iterator begin() const { return tensor_->data_.cbegin(); } + const_iterator end() const { return tensor_->data_.cend(); } + + private: + std::shared_ptr tensor_; +}; + +template +class Tensor{ + public: + using scalar_type = T; + + private: + std::shared_ptr> data_; +}; + +template<> +class Tensor : public float_32 { + public: + Tensor(std::size_t size) : float_32(size) { + std::cout << "i'm a specialization"; + } +}; + +Tensor fn(Tensor x){ + return x; +} + + +} // namespace net + +int main() { + net::Tensor tensor(10); + net::Tensor tensor2 = net::fn(tensor); + for(auto i : tensor2) std::cout << i << std::endl; +} \ No newline at end of file diff --git a/in-process/usless_datareader.hpp b/in-process/usless_datareader.hpp index c71463b..a0040a3 100644 --- a/in-process/usless_datareader.hpp +++ b/in-process/usless_datareader.hpp @@ -10,9 +10,10 @@ #include /* + #THIS NEEDS A COMPLETE REWRITE + Reads a dataset from a csv file. Returns a pair of vectors, the first one containing the features and the second one the targets. - This is a very simple implementation, it is not optimized for speed, but it is enough for the purpose of this project. It's header only, so it can be included in the main file easily. */ diff --git a/include/CaberNet.h b/include/CaberNet.h index 5a3cb20..55aef05 100644 --- a/include/CaberNet.h +++ b/include/CaberNet.h @@ -3,5 +3,4 @@ #include "CaberNet/functions.h" #include "CaberNet/layers.h" #include "CaberNet/model.h" -#include "CaberNet/subscripts.h" #include "CaberNet/criterions.h" \ No newline at end of file diff --git a/include/CaberNet/criterions.h b/include/CaberNet/criterions.h index b88c42d..a8b7e2d 100644 --- a/include/CaberNet/criterions.h +++ b/include/CaberNet/criterions.h @@ -4,7 +4,6 @@ #include #include "tensor.h" -#include "subscripts.h" namespace internal { class Criterion; @@ -15,8 +14,8 @@ namespace net::criterion { class NegativeLogLikelihood { public: ~NegativeLogLikelihood(); - NegativeLogLikelihood(Tensor output, Subscripts targets); - Tensor::scalar_type loss() const; + NegativeLogLikelihood(Tensor output, Tensor targets); + float loss() const; private: std::unique_ptr criterion_; diff --git a/include/CaberNet/functions.h b/include/CaberNet/functions.h index 92730d5..dce8dd4 100644 --- a/include/CaberNet/functions.h +++ b/include/CaberNet/functions.h @@ -3,9 +3,9 @@ namespace net::function { -Tensor linear(const Tensor& input, const Tensor& weight, const Tensor& bias); -Tensor softmax(Tensor& input, int axis); -Tensor log_softmax(Tensor&input, int axis); -Tensor relu(const Tensor& input); +Tensor linear(const Tensor& input, const Tensor& weight, const Tensor& bias); +Tensor softmax(Tensor& input, int axis); +Tensor log_softmax(Tensor&input, int axis); +Tensor relu(const Tensor& input); } // namespace net::function diff --git a/include/CaberNet/layers.h b/include/CaberNet/layers.h index ddfc5f6..d4f534e 100644 --- a/include/CaberNet/layers.h +++ b/include/CaberNet/layers.h @@ -15,29 +15,29 @@ class Linear : public Model { size_type input_features, size_type output_features, initializer distribution = initializer::He ); - - Tensor forward(Tensor x); + + Tensor forward(Tensor x); private: - Tensor weight_; - Tensor bias_; + Tensor weight_; + Tensor bias_; }; struct ReLU : public Model { ReLU() = default; - Tensor forward(Tensor input); + Tensor forward(Tensor input); }; struct Softmax : public Model { int axis; Softmax(int axis); - Tensor forward(Tensor input); + Tensor forward(Tensor input); }; struct LogSoftmax : public Model { int axis; LogSoftmax(int axis); - Tensor forward(Tensor input); + Tensor forward(Tensor input); }; @@ -55,16 +55,14 @@ class Sequence : public Model { Sequence(Layers&& ... layers) { layers_ = { std::forward(layers)... }; } - - - Tensor forward(Tensor input) { + + Tensor forward(Tensor input) { for (auto& layer : layers_) { input = std::visit([input](auto&& argument) { return argument.forward(input); }, layer); } return input; } - private: std::vector layers_; }; diff --git a/include/CaberNet/model.h b/include/CaberNet/model.h index b9e27e8..59d6fee 100644 --- a/include/CaberNet/model.h +++ b/include/CaberNet/model.h @@ -12,7 +12,7 @@ class Model { using size_type = std::size_t; using shape_type = std::vector; - Tensor operator()(Tensor input) { + Tensor operator()(Tensor input) { return static_cast(this)->forward(input); } }; diff --git a/include/CaberNet/tensor.h b/include/CaberNet/tensor.h index d437ff6..2197b56 100644 --- a/include/CaberNet/tensor.h +++ b/include/CaberNet/tensor.h @@ -1,66 +1,35 @@ #pragma once + #include #include #include -#include "initializers.h" -#include "statistics/distributions.h" - -namespace internal { class Tensor; } +#include "tensor/tensor_float32.h" +#include "tensor/tensor_int16.h" namespace net { +template class Tensor { public: - using scalar_type = float; - using value_type = scalar_type; // Needed for GMock's built-in matches - using pointer = scalar_type*; - using const_pointer = const scalar_type*; - using size_type = std::size_t; - using shape_type = std::vector; - using storage_type = std::vector; - using iterator = storage_type::iterator; - using const_iterator = storage_type::const_iterator; - - Tensor() = default; - Tensor(std::shared_ptr tensor); - Tensor(shape_type shape, bool gradient_requirement = false); - Tensor(shape_type shape, requires_gradient gradient_requirement); - - void reshape(shape_type shape); - - void backward(const Tensor& gradient); - void perform(); - - void fill(initializer distribution); - void fill(scalar_type value); - void fill(std::vector values); - - internal::Tensor* internal() const; - internal::Tensor* internal(); - - iterator begin(); - iterator end(); - const_iterator begin() const; - const_iterator end() const; - const_iterator cbegin() const; - const_iterator cend() const; - - Tensor gradient() const; - - pointer data(); - const_pointer data() const; - shape_type shape() const; - size_type rank() const; + Tensor() { throw std::runtime_error("Bad type or not implemented type."); } +}; - friend std::ostream& operator<<(std::ostream& ostream, const Tensor& tensor); +template<> +struct Tensor : public TensorFloat32 { + using TensorFloat32::TensorFloat32; +}; - private: - std::shared_ptr tensor_; +template<> +struct Tensor : public TensorInt16 { + using TensorInt16::TensorInt16; }; -Tensor operator + (const Tensor& first, const Tensor& second); -Tensor operator * (const Tensor& first, const Tensor& second); -Tensor matmul(const Tensor& first, const Tensor& second); +std::ostream& operator<<(std::ostream& ostream, const Tensor& tensor); +std::ostream& operator<<(std::ostream& ostream, const Tensor& tensor); + +Tensor matmul(const Tensor& first, const Tensor& second); +Tensor operator + (const Tensor& first, const Tensor& second); +Tensor operator * (const Tensor& first, const Tensor& second); } // namespace net \ No newline at end of file diff --git a/include/CaberNet/tensor/tensor_float32.h b/include/CaberNet/tensor/tensor_float32.h new file mode 100644 index 0000000..0895da8 --- /dev/null +++ b/include/CaberNet/tensor/tensor_float32.h @@ -0,0 +1,64 @@ +#pragma once +#include +#include +#include + +#include "../initializers.h" +#include "../statistics/distributions.h" + +namespace internal { class Tensor; } + +namespace net { + +template class Tensor; + +class TensorFloat32 { + public: + using value_type = float; // Needed for GMock's built-in matches + using pointer = value_type*; + using const_pointer = const value_type*; + + using size_type = std::size_t; + using shape_type = std::vector; + using storage_type = std::vector; + + using iterator = storage_type::iterator; + using const_iterator = storage_type::const_iterator; + + TensorFloat32() = default; + TensorFloat32(std::shared_ptr tensor); + TensorFloat32(shape_type shape, bool gradient_requirement = false); + TensorFloat32(shape_type shape, requires_gradient gradient_requirement); + + void reshape(shape_type shape); + + void backward(const Tensor& gradient); + void perform(); + + void fill(initializer distribution); + void fill(value_type value); + void fill(std::vector values); + + internal::Tensor* internal() const; + internal::Tensor* internal(); + + iterator begin(); + iterator end(); + const_iterator begin() const; + const_iterator end() const; + const_iterator cbegin() const; + const_iterator cend() const; + + Tensor gradient() const; + + pointer data(); + const_pointer data() const; + shape_type shape() const; + size_type rank() const; + + private: + std::shared_ptr tensor_; +}; + + +} // namespace net diff --git a/include/CaberNet/subscripts.h b/include/CaberNet/tensor/tensor_int16.h similarity index 55% rename from include/CaberNet/subscripts.h rename to include/CaberNet/tensor/tensor_int16.h index fa7f344..cd4e323 100644 --- a/include/CaberNet/subscripts.h +++ b/include/CaberNet/tensor/tensor_int16.h @@ -3,30 +3,27 @@ #include #include -// The bad news is that I couldn't make the Tensor of integer class work, tried with std::variant and std::visit -// but I think that it just won't be possible. -// Te good news is that we may not need a Tensor of integers, since it will have a completly different concern -// than the Tensor of floats, I would be a bad idea to mix types. This is not python. -// If you came up with a better name idea than subscripts, please let me know. - namespace internal { template class Array; } namespace net { -class Subscripts { +template class Tensor; + +class TensorInt16 { public: - using value_type = std::size_t; + using value_type = int16_t; using pointer = value_type*; using const_pointer = const value_type*; + using size_type = std::size_t; using shape_type = std::vector; using iterator = std::vector::iterator; using const_iterator = std::vector::const_iterator; - Subscripts() = default; - Subscripts(std::shared_ptr> subscripts); - Subscripts(shape_type shape); + TensorInt16() = default; + TensorInt16(std::shared_ptr> subscripts); + TensorInt16(shape_type shape); void reshape(shape_type shape); void fill(value_type value); @@ -47,10 +44,9 @@ class Subscripts { shape_type shape() const; size_type rank() const; - friend std::ostream& operator<<(std::ostream& ostream, const Subscripts& subscripts); - private: - std::shared_ptr> subscripts_; + std::shared_ptr> data_; + }; } // namespace net \ No newline at end of file diff --git a/src/criterions.cpp b/src/criterions.cpp index 5178239..5e97cd5 100644 --- a/src/criterions.cpp +++ b/src/criterions.cpp @@ -1,5 +1,4 @@ #include "../include/CaberNet/tensor.h" -#include "../include/CaberNet/subscripts.h" #include "../include/CaberNet/criterions.h" #include "internals/internal_tensor.hpp" @@ -9,11 +8,11 @@ namespace net::criterion { NegativeLogLikelihood::~NegativeLogLikelihood() = default; -NegativeLogLikelihood::NegativeLogLikelihood(Tensor output, Subscripts targets) { +NegativeLogLikelihood::NegativeLogLikelihood(Tensor output, Tensor targets) { criterion_ = std::make_unique(output.internal(), targets.internal()); } -Tensor::scalar_type NegativeLogLikelihood::loss() const { +float NegativeLogLikelihood::loss() const { return criterion_->loss(); } diff --git a/src/functions.cpp b/src/functions.cpp index 8b57d51..0badbde 100644 --- a/src/functions.cpp +++ b/src/functions.cpp @@ -8,20 +8,20 @@ namespace net::function { -Tensor linear(const Tensor& input, const Tensor& weight, const Tensor& bias) { - return Tensor(std::make_shared( input.internal(), weight.internal(), bias.internal() )); +Tensor linear(const Tensor& input, const Tensor& weight, const Tensor& bias) { + return Tensor(std::make_shared( input.internal(), weight.internal(), bias.internal() )); } -Tensor softmax(Tensor& input, int axis) { - return Tensor(std::make_shared( input.internal(), axis )); +Tensor softmax(Tensor& input, int axis) { + return Tensor(std::make_shared( input.internal(), axis )); } -Tensor log_softmax(Tensor& input, int axis) { - return Tensor(std::make_shared( input.internal(), axis )); +Tensor log_softmax(Tensor& input, int axis) { + return Tensor(std::make_shared( input.internal(), axis )); } -Tensor relu(const Tensor& input) { - return Tensor(std::make_shared( input.internal() )); +Tensor relu(const Tensor& input) { + return Tensor(std::make_shared( input.internal() )); } } // namespace net::function \ No newline at end of file diff --git a/src/internals/criterions/internal_criterions.hpp b/src/internals/criterions/internal_criterions.hpp index 149fb7b..2b29742 100644 --- a/src/internals/criterions/internal_criterions.hpp +++ b/src/internals/criterions/internal_criterions.hpp @@ -6,13 +6,15 @@ namespace internal { +// TODO : manage the int type. + class Criterion { public: using size_type = Tensor::size_type; using shape_type = Tensor::shape_type; using scalar_type = Tensor::scalar_type; - Criterion(Tensor* output, Array* targets) { + Criterion(Tensor* output, Array* targets) { output_ = output; targets_ = targets; } @@ -21,20 +23,20 @@ class Criterion { virtual scalar_type loss() const = 0; Tensor* output() const { return output_; } - Array* targets() const { return targets_; } + Array* targets() const { return targets_; } size_type number_of_classes() const { return output()->size() / batch_size(); } size_type batch_size() const { return output()->shape().front(); } private: Tensor* output_; - Array* targets_; + Array* targets_; }; class NLLLoss : public Criterion { public: ~NLLLoss() final = default; - NLLLoss(Tensor* output, Array* targets) : Criterion(output, targets) {} + NLLLoss(Tensor* output, Array* targets) : Criterion(output, targets) {} scalar_type loss() const final; }; diff --git a/src/internals/internal_array.hpp b/src/internals/internal_array.hpp index b90acd6..353d690 100644 --- a/src/internals/internal_array.hpp +++ b/src/internals/internal_array.hpp @@ -12,31 +12,31 @@ dimension of the array. #include #include +#include "internal_base.hpp" + namespace internal { template -class Array { +class Array : public Base { public: using scalar_type = T; using pointer = scalar_type*; using const_pointer = const scalar_type*; - using size_type = std::size_t; - using shape_type = std::vector; - using storage_type = std::vector; - + using storage_type = std::vector; using iterator = typename storage_type::iterator; using const_iterator = typename storage_type::const_iterator; - virtual ~Array() = default; - Array() = default; - Array(const Array* other) { copy(other); } - Array(shape_type shape) { reshape(shape); } - size_type size() const { return size_; } - shape_type shape() const { return shape_; } - size_type rank() const { return shape_.size(); } + Array(const Array* other) : Base(other->shape()) { + storage_ = other->storage_; + } + + Array(shape_type shape) : Base(shape) { + storage_.resize(size()); + } + pointer data() { return storage_.data(); } const_pointer data() const { return storage_.data(); } @@ -48,35 +48,28 @@ class Array { const_iterator cend() const { return storage_.cend(); } void copy(const Array* other) { - size_ = other->size_; - shape_ = other->shape_; + reshape(other->shape()); storage_ = other->storage_; }; void move(Array* other) { - size_ = other->size_; - shape_ = std::move(other->shape_); + reshape(other->shape()); + other->collapse(); storage_ = std::move(other->storage_); - other->size_ = 0; - other->shape_.clear(); other->storage_.clear(); }; void reshape(const shape_type& shape) { - shape_ = shape; - size_ = 1; for (size_type dimension : shape) size_ *= dimension; - storage_.resize(size_); + Base::reshape(shape); + storage_.resize(size()); } void clear() { - size_ = 0; - shape_.clear(); storage_.clear(); + collapse(); } private: - size_type size_; - shape_type shape_; storage_type storage_; }; diff --git a/src/internals/internal_base.hpp b/src/internals/internal_base.hpp new file mode 100644 index 0000000..bf55ae2 --- /dev/null +++ b/src/internals/internal_base.hpp @@ -0,0 +1,49 @@ +#ifndef INTERNAL_BASE_HPP +#define INTERNAL_BASE_HPP + +#include +#include + +namespace internal { + +class Base { + public: + using size_type = std::size_t; + using shape_type = std::vector; + + Base() = default; + + Base(shape_type shape) : shape_(shape) { + size_ = 1; + for (auto& dimension : shape_) size_ *= dimension; + } + + size_type size() const { return size_; } + shape_type shape() const { return shape_; } + size_type rank() const { return shape_.size(); } + + void reshape(shape_type shape) { + shape_ = shape; + size_ = 1; + for (auto& dimension : shape_) size_ *= dimension; + } + + void melt() { + shape_.clear(); + shape_.push_back(size_); + } + + void collapse() { + size_ = 0; + shape_.clear(); + } + + private: + size_type size_; + shape_type shape_; + // shape_type strides_; +}; + +} + +#endif \ No newline at end of file diff --git a/src/internals/internal_tensor.hpp b/src/internals/internal_tensor.hpp index 4c254ca..5b23bca 100644 --- a/src/internals/internal_tensor.hpp +++ b/src/internals/internal_tensor.hpp @@ -47,8 +47,8 @@ class Tensor : public Array { if (is_leaf_ && requires_gradient_) gradient_ = new Tensor(shape, false, false); } + virtual ~Tensor() { if (is_leaf_ && requires_gradient_) delete gradient_; } Tensor(const Tensor* other) { copy(other); } - ~Tensor() override { if (is_leaf_ && requires_gradient_) delete gradient_; } Tensor(const Tensor& other) = delete; Tensor(Tensor&& other) = delete; Tensor& operator=(const Tensor& other) = delete; diff --git a/src/layers.cpp b/src/layers.cpp index 02e1485..b0d97b5 100644 --- a/src/layers.cpp +++ b/src/layers.cpp @@ -19,20 +19,21 @@ LogSoftmax::LogSoftmax(int axis) : axis(axis) {} /// forward methods -Tensor Linear::forward(Tensor input) { - return Tensor(std::make_shared(input.internal(), weight_.internal(), bias_.internal())); +Tensor Linear::forward(Tensor input) { + return Tensor(std::make_shared(input.internal(), weight_.internal(), bias_.internal())); + } -Tensor ReLU::forward(Tensor input) { - return Tensor(std::make_shared(input.internal())); +Tensor ReLU::forward(Tensor input) { + return Tensor(std::make_shared(input.internal())); } -Tensor Softmax::forward(Tensor input) { - return Tensor(std::make_shared(input.internal(), axis)); +Tensor Softmax::forward(Tensor input) { + return Tensor(std::make_shared(input.internal(), axis)); } -Tensor LogSoftmax::forward(Tensor input) { - return Tensor(std::make_shared(input.internal(), axis)); +Tensor LogSoftmax::forward(Tensor input) { + return Tensor(std::make_shared(input.internal(), axis)); } } // namespace net::layer \ No newline at end of file diff --git a/src/subscripts.cpp b/src/subscripts.cpp deleted file mode 100644 index c0f5e7b..0000000 --- a/src/subscripts.cpp +++ /dev/null @@ -1,49 +0,0 @@ -#include "../include/CaberNet/subscripts.h" - -#include "internals/internal_array.hpp" - -namespace net { - -Subscripts::Subscripts(std::shared_ptr> subscripts) { - subscripts_ = subscripts; -} - -Subscripts::Subscripts(shape_type shape) { - subscripts_ = std::make_shared>(shape); -} - -void Subscripts::reshape(shape_type shape) { - subscripts_->reshape(shape); -} - -void Subscripts::fill(value_type value) { - std::fill(subscripts_->begin(), subscripts_->end(), value); -} - -void Subscripts::fill(std::vector values) { - std::move(values.begin(), values.end(), subscripts_->begin()); -} - -internal::Array* Subscripts::internal() const { return subscripts_.get(); } -internal::Array* Subscripts::internal() { return subscripts_.get(); } - -Subscripts::iterator Subscripts::begin() { return subscripts_->begin(); } -Subscripts::iterator Subscripts::end() { return subscripts_->end(); } -Subscripts::const_iterator Subscripts::begin() const { return subscripts_->cbegin(); } -Subscripts::const_iterator Subscripts::end() const { return subscripts_->cend(); } -Subscripts::const_iterator Subscripts::cbegin() const { return subscripts_->cbegin(); } -Subscripts::const_iterator Subscripts::cend() const { return subscripts_->cend(); } - -Subscripts::pointer Subscripts::data() { return subscripts_->data(); } -Subscripts::const_pointer Subscripts::data() const { return subscripts_->data(); } -Subscripts::shape_type Subscripts::shape() const { return subscripts_->shape(); } -Subscripts::size_type Subscripts::rank() const { return subscripts_->rank(); } - -std::ostream& operator<<(std::ostream& ostream, const Subscripts& subscript) { - ostream << "["; - for (auto element : subscript) ostream << element << ", "; - ostream << "]"; - return ostream; -} - -} // namespace net \ No newline at end of file diff --git a/src/tensor.cpp b/src/tensor.cpp index 1c41434..d11dd99 100644 --- a/src/tensor.cpp +++ b/src/tensor.cpp @@ -6,94 +6,31 @@ namespace net { -Tensor::Tensor(std::shared_ptr tensor) { - tensor_ = tensor; - internal::Graph::add(tensor_); -} - -Tensor::Tensor(shape_type shape, bool gradient_requirement ) { - tensor_ = std::make_shared(shape); - tensor_-> requires_gradient(gradient_requirement); - internal::Graph::add(tensor_); -} - -Tensor::Tensor(shape_type shape, requires_gradient gradient_requirement ) { - tensor_ = std::make_shared(shape); - tensor_-> requires_gradient(static_cast(gradient_requirement)); - internal::Graph::add(tensor_); -} - -void Tensor::reshape(shape_type shape) { - if(tensor_ == nullptr) tensor_ = std::make_shared(shape, false, false); - tensor_-> reshape(shape); -} - -Tensor Tensor::gradient() const { - Tensor gradient = std::make_shared(shape(), false); - std::copy(tensor_->gradient()->begin(), tensor_->gradient()->end(), gradient.begin()); - return gradient; -} - -internal::Tensor* Tensor::internal() const {return tensor_.get(); } -internal::Tensor* Tensor::internal() { return tensor_.get(); } - -void Tensor::backward(const Tensor& gradient) { tensor_-> backward(gradient.internal()); } -void Tensor::perform() { tensor_-> forward(); } - -Tensor::iterator Tensor::begin() { return tensor_->begin(); } -Tensor::iterator Tensor::end() { return tensor_->end(); } -Tensor::const_iterator Tensor::begin() const { return tensor_->begin(); } -Tensor::const_iterator Tensor::end() const { return tensor_->end(); } -Tensor::const_iterator Tensor::cbegin() const { return tensor_->cbegin(); } -Tensor::const_iterator Tensor::cend() const { return tensor_->cend(); } - -Tensor::pointer Tensor::data() { return tensor_->data(); } -Tensor::const_pointer Tensor::data() const { return tensor_->data(); } -Tensor::shape_type Tensor::shape() const { return tensor_->shape(); } -Tensor::size_type Tensor::rank() const { return tensor_->rank(); } - -Tensor operator + (const Tensor& first, const Tensor& second) { - return Tensor(std::make_shared( first.internal(), second.internal() )); -} - -Tensor operator * (const Tensor& first, const Tensor& second) { - return Tensor(std::make_shared( first.internal(), second.internal() )); -} -Tensor matmul(const Tensor& first, const Tensor& second) { - return Tensor(std::make_shared( first.internal(), second.internal() )); -} - -void Tensor::fill(initializer distribution) { - distribution::Distribution* filler = nullptr; - switch (distribution) { - case initializer::He : - filler = new distribution::Normal(0, std::sqrt(2.0 / shape().back())); - for (auto& element : *this) element = filler->generate(); - break; - - default : - throw std::runtime_error("Invalid initializer"); - break; - } - - delete filler; -} - -void Tensor::fill(scalar_type value) { - std::fill(tensor_->begin(), tensor_->end(), value); -} - -void Tensor::fill(std::vector values) { - std::move(values.begin(), values.end(), tensor_->begin()); -} +std::ostream& operator<<(std::ostream& ostream, const Tensor& tensor) { + ostream << "["; + for (auto element : tensor) ostream << element << ", "; + ostream << "]"; + return ostream; +} -std::ostream& operator<<(std::ostream& ostream, const Tensor& tensor) { +std::ostream& operator<<(std::ostream& ostream, const Tensor& tensor) { ostream << "["; for (auto element : tensor) ostream << element << ", "; ostream << "]"; return ostream; +} + +Tensor operator + (const Tensor & first, const Tensor & second) { + return Tensor (std::make_shared( first.internal(), second.internal() )); +} + +Tensor operator * (const Tensor & first, const Tensor & second) { + return Tensor(std::make_shared( first.internal(), second.internal() )); } -} // namespace net +Tensor matmul(const Tensor & first, const Tensor & second) { + return Tensor(std::make_shared( first.internal(), second.internal() )); +} +} // namespace net \ No newline at end of file diff --git a/src/tensor/tensor_float32.cpp b/src/tensor/tensor_float32.cpp new file mode 100644 index 0000000..9a2fdd9 --- /dev/null +++ b/src/tensor/tensor_float32.cpp @@ -0,0 +1,95 @@ +#include "../../include/CaberNet/tensor/tensor_float32.h" +#include "../../include/CaberNet/tensor.h" + +#include "../internals/internal_tensor.hpp" +#include "../internals/internal_graph.hpp" +#include "../internals/operations/internal_operations.hpp" + +namespace net { + +TensorFloat32::TensorFloat32(std::shared_ptr tensor) { + tensor_ = tensor; + internal::Graph::add(tensor_); +} + +TensorFloat32::TensorFloat32(shape_type shape, bool gradient_requirement ) { + tensor_ = std::make_shared(shape); + tensor_-> requires_gradient(gradient_requirement); + internal::Graph::add(tensor_); +} + +TensorFloat32::TensorFloat32(shape_type shape, requires_gradient gradient_requirement ) { + tensor_ = std::make_shared(shape); + tensor_-> requires_gradient(static_cast(gradient_requirement)); + internal::Graph::add(tensor_); +} + +void TensorFloat32::reshape(shape_type shape) { + if(tensor_ == nullptr) tensor_ = std::make_shared(shape, false, false); + tensor_-> reshape(shape); +} + +Tensor TensorFloat32::gradient() const { + Tensor gradient = std::make_shared(shape(), false); + std::copy(tensor_->gradient()->begin(), tensor_->gradient()->end(), gradient.begin()); + return gradient; +} + +internal::Tensor* TensorFloat32::internal() const {return tensor_.get(); } +internal::Tensor* TensorFloat32::internal() { return tensor_.get(); } + +void TensorFloat32::backward(const Tensor& gradient) { tensor_-> backward(gradient.internal()); } +void TensorFloat32::perform() { tensor_-> forward(); } // TODO : this should have a return type. + +TensorFloat32::iterator TensorFloat32::begin() { return tensor_->begin(); } +TensorFloat32::iterator TensorFloat32::end() { return tensor_->end(); } +TensorFloat32::const_iterator TensorFloat32::begin() const { return tensor_->begin(); } +TensorFloat32::const_iterator TensorFloat32::end() const { return tensor_->end(); } +TensorFloat32::const_iterator TensorFloat32::cbegin() const { return tensor_->cbegin(); } +TensorFloat32::const_iterator TensorFloat32::cend() const { return tensor_->cend(); } + +TensorFloat32::pointer TensorFloat32::data() { return tensor_->data(); } +TensorFloat32::const_pointer TensorFloat32::data() const { return tensor_->data(); } +TensorFloat32::shape_type TensorFloat32::shape() const { return tensor_->shape(); } +TensorFloat32::size_type TensorFloat32::rank() const { return tensor_->rank(); } + +/* +Tensor operator + (const Tensor& first, const Tensor& second) { + return Tensor(std::make_shared( first.internal(), second.internal() )); +} + +Tensor operator * (const Tensor& first, const Tensor& second) { + return Tensor(std::make_shared( first.internal(), second.internal() )); +} + +Tensor matmul(const Tensor& first, const Tensor& second) { + return Tensor(std::make_shared( first.internal(), second.internal() )); +} +*/ + +void TensorFloat32::fill(initializer distribution) { + distribution::Distribution* filler = nullptr; + switch (distribution) { + case initializer::He : + filler = new distribution::Normal(0, std::sqrt(2.0 / shape().back())); + for (auto& element : *this) element = filler->generate(); + break; + + default : + throw std::runtime_error("Invalid initializer"); + break; + } + + delete filler; +} + +void TensorFloat32::fill(value_type value) { + std::fill(tensor_->begin(), tensor_->end(), value); +} + +void TensorFloat32::fill(std::vector values) { + std::move(values.begin(), values.end(), tensor_->begin()); +} + +} // namespace net + diff --git a/src/tensor/tensor_int16.cpp b/src/tensor/tensor_int16.cpp new file mode 100644 index 0000000..8efd489 --- /dev/null +++ b/src/tensor/tensor_int16.cpp @@ -0,0 +1,43 @@ +#include "../../include/CaberNet/tensor/tensor_int16.h" +#include "../../include/CaberNet/tensor.h" + +#include "../internals/internal_array.hpp" + +namespace net { + +TensorInt16::TensorInt16(std::shared_ptr> subscripts) { + data_ = subscripts; +} + +TensorInt16::TensorInt16(shape_type shape) { + data_ = std::make_shared>(shape); +} + +void TensorInt16::reshape(shape_type shape) { + data_->reshape(shape); +} + +void TensorInt16::fill(value_type value) { + std::fill(data_->begin(), data_->end(), value); +} + +void TensorInt16::fill(std::vector values) { + std::move(values.begin(), values.end(), data_->begin()); +} + +internal::Array* TensorInt16::internal() const { return data_.get(); } +internal::Array* TensorInt16::internal() { return data_.get(); } + +TensorInt16::iterator TensorInt16::begin() { return data_->begin(); } +TensorInt16::iterator TensorInt16::end() { return data_->end(); } +TensorInt16::const_iterator TensorInt16::begin() const { return data_->cbegin(); } +TensorInt16::const_iterator TensorInt16::end() const { return data_->cend(); } +TensorInt16::const_iterator TensorInt16::cbegin() const { return data_->cbegin(); } +TensorInt16::const_iterator TensorInt16::cend() const { return data_->cend(); } + +TensorInt16::pointer TensorInt16::data() { return data_->data(); } +TensorInt16::const_pointer TensorInt16::data() const { return data_->data(); } +TensorInt16::shape_type TensorInt16::shape() const { return data_->shape(); } +TensorInt16::size_type TensorInt16::rank() const { return data_->rank(); } + +} // namespace net \ No newline at end of file diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 3fdfcfe..58462ef 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,7 +1,7 @@ find_package(GTest REQUIRED CONFIG) include(GoogleTest) -add_executable(${PROJECT_NAME}Tests criterions.cpp functions.cpp operations.cpp subscripts.cpp) +add_executable(${PROJECT_NAME}Tests criterions.cpp functions.cpp operations.cpp) target_link_libraries(${PROJECT_NAME}Tests PRIVATE ${PROJECT_NAME} GTest::gtest GTest::gtest_main GTest::gmock) target_compile_features(${PROJECT_NAME}Tests PRIVATE cxx_std_20) gtest_add_tests(TARGET ${PROJECT_NAME}Tests) diff --git a/tests/criterions.cpp b/tests/criterions.cpp index b6814ab..3191627 100644 --- a/tests/criterions.cpp +++ b/tests/criterions.cpp @@ -20,7 +20,7 @@ TEST(criterion, loss) { */ - net::Tensor X({3, 5}, false); X.fill( + net::Tensor X({3, 5}, false); X.fill( { -1.0, 2.0, -0.5, 1.0, 3.0, 0.5, 1.0, 2.0, -1.0, -2.0, @@ -28,7 +28,7 @@ TEST(criterion, loss) { } ); - net::Subscripts y({3,1}); y.fill({1, 3, 0}); + net::Tensor y({3,1}); y.fill({1, 3, 0}); X = net::function::log_softmax(X,1); net::criterion::NegativeLogLikelihood criterion(X, y); @@ -36,5 +36,6 @@ TEST(criterion, loss) { /* The result should be: 1.82998 */ + EXPECT_FLOAT_EQ(1.8298835f, criterion.loss()); } diff --git a/tests/functions.cpp b/tests/functions.cpp index 56f0b2f..6d7a8d6 100644 --- a/tests/functions.cpp +++ b/tests/functions.cpp @@ -25,10 +25,10 @@ TEST(functions, gradient) { print(b.grad) */ - net::Tensor x({2,3}, false); x.fill({1,2,3,4,5,6}); - net::Tensor w({4,3}, true); w.fill({1,2,-3,4,5,6,7,8,-9,10,11,-12}); - net::Tensor b({1,4}, true); b.fill({1,2,3,4}); - net::Tensor I({2,4}, false); I.fill(1); + net::Tensor x({2,3}, false); x.fill({1,2,3,4,5,6}); + net::Tensor w({4,3}, true); w.fill({1,2,-3,4,5,6,7,8,-9,10,11,-12}); + net::Tensor b({1,4}, true); b.fill({1,2,3,4}); + net::Tensor I({2,4}, false); I.fill(1); x = net::function::linear(x,w,b); x = net::function::relu(x); diff --git a/tests/operations.cpp b/tests/operations.cpp index 9fe19b2..46a1780 100644 --- a/tests/operations.cpp +++ b/tests/operations.cpp @@ -31,11 +31,11 @@ TEST(operations, matmul) { */ - net::Tensor x({2,3}, false); x.fill({1,2,3,4,5,6}); - net::Tensor y({2,3}, true); y.fill({1,1,1,-1,-1,-1}); - net::Tensor z({2,3}, true); z.fill(1); - net::Tensor I({2,3}, false); I.fill(1); - net::Tensor w({3,3}, true); w.fill({1,2,3,4,5,6,7,8,9}); + net::Tensor x({2,3}, false); x.fill({1,2,3,4,5,6}); + net::Tensor y({2,3}, true); y.fill({1,1,1,-1,-1,-1}); + net::Tensor z({2,3}, true); z.fill(1); + net::Tensor I({2,3}, false); I.fill(1); + net::Tensor w({3,3}, true); w.fill({1,2,3,4,5,6,7,8,9}); x = x + I; x = net::matmul(x, w); diff --git a/tests/subscripts.cpp b/tests/subscripts.cpp deleted file mode 100644 index c43b9fa..0000000 --- a/tests/subscripts.cpp +++ /dev/null @@ -1,12 +0,0 @@ -#include "CaberNet.h" -#include -#include - -using ::testing::ElementsAre; - -TEST(subscripts, fill) { - net::Subscripts y({2, 3, 4}); - y.fill(1); - - EXPECT_THAT(y, ElementsAre(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)); -}