Skip to content
This repository has been archived by the owner on Jul 24, 2024. It is now read-only.

Commit

Permalink
Changes (#34)
Browse files Browse the repository at this point in the history
* static polimorphism for layers

* refactored internal array

* added templates

* changes
  • Loading branch information
mr-raccoon-97 authored Oct 5, 2023
1 parent c7b3641 commit 7538f66
Show file tree
Hide file tree
Showing 30 changed files with 513 additions and 325 deletions.
6 changes: 4 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,18 +10,20 @@ set(SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/src)
set(HEADERS
${INCLUDE_DIR}/CaberNet.h
${INCLUDE_DIR}/CaberNet/tensor.h
${INCLUDE_DIR}/CaberNet/tensor/tensor_float32.h
${INCLUDE_DIR}/CaberNet/tensor/tensor_int16.h
${INCLUDE_DIR}/CaberNet/functions.h
${INCLUDE_DIR}/CaberNet/layers.h
${INCLUDE_DIR}/CaberNet/model.h
${INCLUDE_DIR}/CaberNet/subscripts.h
${INCLUDE_DIR}/CaberNet/criterions.h
)

set(SOURCES
${SOURCE_DIR}/tensor.cpp
${SOURCE_DIR}/tensor/tensor_float32.cpp
${SOURCE_DIR}/tensor/tensor_int16.cpp
${SOURCE_DIR}/functions.cpp
${SOURCE_DIR}/layers.cpp
${SOURCE_DIR}/subscripts.cpp
${SOURCE_DIR}/criterions.cpp
${SOURCE_DIR}/internals/operations/internal_operation_addition.cpp
${SOURCE_DIR}/internals/operations/internal_operation_multiplication.cpp
Expand Down
20 changes: 5 additions & 15 deletions examples/functions.cpp
Original file line number Diff line number Diff line change
@@ -1,16 +1,6 @@
/*
To run this code build the library:
in your terminal:
https://github.com/xEricCardozo/CaberNet-cpp.git
and inside the folder:
mkdir build
cd build
cmake ..
make
To run this code build the library following the instructions in the .github folder.
then compile this file with:
Expand All @@ -24,12 +14,12 @@ g++ functions.cpp -LCaberNet/lib -lCaberNet -I CaberNet/include

int main() {
// You can use enums to set the gradient requirement:
net::Tensor x({2,3}, net::requires_gradient::False); x.fill({1,2,3,4,5,6});
net::Tensor w({4,3}, net::requires_gradient::True); w.fill({1,2,-3,4,5,6,7,8,-9,10,11,-12});
net::Tensor<float> x({2,3}, net::requires_gradient::False); x.fill({1,2,3,4,5,6});
net::Tensor<float> w({4,3}, net::requires_gradient::True); w.fill({1,2,-3,4,5,6,7,8,-9,10,11,-12});

// Or use just a boolean. Whatever you prefer.
net::Tensor b({1,4}, true); b.fill({1,2,3,4});
net::Tensor I({2,4}, false); I.fill(1);
net::Tensor<float> b({1,4}, true); b.fill({1,2,3,4});
net::Tensor<float> I({2,4}, false); I.fill(1);

x = net::function::linear(x,w,b);
x = net::function::relu(x);
Expand Down
17 changes: 14 additions & 3 deletions examples/layers.cpp
Original file line number Diff line number Diff line change
@@ -1,3 +1,14 @@
/*
To run this code build the library following the instructions in the .github folder.
then compile this file with:
g++ layers.cpp -LCaberNet/lib -lCaberNet -I CaberNet/include
./a.out
*/

#include <CaberNet/CaberNet.h>

struct Autoencoder : public net::Model<Autoencoder> {
Expand All @@ -17,7 +28,7 @@ struct Autoencoder : public net::Model<Autoencoder> {
net::layer::LogSoftmax(1/*axis*/)
};

net::Tensor forward(net::Tensor x) {
net::Tensor<float> forward(net::Tensor<float> x) {
x = encoder(x);
x = decoder(x);
return x;
Expand All @@ -27,8 +38,8 @@ struct Autoencoder : public net::Model<Autoencoder> {

int main() {
Autoencoder model;
net::Tensor x({1, 784}); x.fill(net::initializer::He);
net::Tensor y = model(x);
net::Tensor<float> x({1, 784}); x.fill(net::initializer::He);
net::Tensor<float> y = model(x);
y.perform();
std::cout << y;
}
22 changes: 6 additions & 16 deletions examples/operations.cpp
Original file line number Diff line number Diff line change
@@ -1,16 +1,6 @@
/*
To run this code build the library:
in your terminal:
https://github.com/xEricCardozo/CaberNet-cpp.git
and inside the folder:
mkdir build
cd build
cmake ..
make
To run this code build the library following the instructions in the .github folder.
then compile this file with:
Expand All @@ -23,11 +13,11 @@ g++ operations.cpp -LCaberNet/lib -lCaberNet -I CaberNet/include
#include <CaberNet/CaberNet.h>

int main() {
net::Tensor x({2,3}, false); x.fill({1,2,3,4,5,6});
net::Tensor y({2,3}, true); y.fill({1,1,1,-1,-1,-1});
net::Tensor z({2,3}, true); z.fill(1);
net::Tensor I({2,3}, false); I.fill(1);
net::Tensor w({3,3}, true); w.fill({1,2,3,4,5,6,7,8,9});
net::Tensor<float> x({2,3}, false); x.fill({1,2,3,4,5,6});
net::Tensor<float> y({2,3}, true); y.fill({1,1,1,-1,-1,-1});
net::Tensor<float> z({2,3}, true); z.fill(1);
net::Tensor<float> I({2,3}, false); I.fill(1);
net::Tensor<float> w({3,3}, true); w.fill({1,2,3,4,5,6,7,8,9});

x = x + I;
x = net::matmul(x, w);
Expand Down
110 changes: 110 additions & 0 deletions in-process/idea.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
#include <iostream>
#include <vector>
#include <memory>

namespace internal {

class Base {
public:
virtual ~Base() = default;
};

template<typename T>
class Array : Base {
public:
using scalar_type = T;
using pointer = scalar_type*;
using const_pointer = const scalar_type*;

using storage_type = std::vector<scalar_type>;
using iterator = typename storage_type::iterator;
using const_iterator = typename storage_type::const_iterator;

Array() = default;
Array(std::size_t size) : data_(size) {
for (std::size_t i = 0; i < size; ++i) data_[i] = 1;
}

storage_type data_;
};

class Tensor : public Array<float> {
public:
Tensor() = default;
Tensor(std::size_t size) : Array(size) {}
};

};


namespace net {

class integer_32 {
public:
using scalar_type = int;
using iterator = std::vector<scalar_type>::iterator;
using const_iterator = std::vector<scalar_type>::const_iterator;

integer_32(std::size_t size) {
tensor_ = std::make_shared<internal::Array<scalar_type>>(size);
}

iterator begin() { return tensor_->data_.begin(); }
iterator end() { return tensor_->data_.end(); }

const_iterator begin() const { return tensor_->data_.cbegin(); }
const_iterator end() const { return tensor_->data_.cend(); }

private:
std::shared_ptr<internal::Array<scalar_type>> tensor_;
};

class float_32 {
public:
using scalar_type = float;
using iterator = std::vector<float>::iterator;
using const_iterator = std::vector<float>::const_iterator;

float_32(std::size_t size) {
tensor_ = std::make_shared<internal::Tensor>(size);
}

iterator begin() { return tensor_->data_.begin(); }
iterator end() { return tensor_->data_.end(); }

const_iterator begin() const { return tensor_->data_.cbegin(); }
const_iterator end() const { return tensor_->data_.cend(); }

private:
std::shared_ptr<internal::Tensor> tensor_;
};

template<typename T = float>
class Tensor{
public:
using scalar_type = T;

private:
std::shared_ptr<internal::Array<T>> data_;
};

template<>
class Tensor<float> : public float_32 {
public:
Tensor(std::size_t size) : float_32(size) {
std::cout << "i'm a specialization";
}
};

Tensor<float> fn(Tensor<float> x){
return x;
}


} // namespace net

int main() {
net::Tensor<float> tensor(10);
net::Tensor tensor2 = net::fn(tensor);
for(auto i : tensor2) std::cout << i << std::endl;
}
3 changes: 2 additions & 1 deletion in-process/usless_datareader.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,10 @@
#include <list>

/*
#THIS NEEDS A COMPLETE REWRITE
Reads a dataset from a csv file.
Returns a pair of vectors, the first one containing the features and the second one the targets.
This is a very simple implementation, it is not optimized for speed, but it is enough for the purpose of this project.
It's header only, so it can be included in the main file easily.
*/

Expand Down
1 change: 0 additions & 1 deletion include/CaberNet.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,4 @@
#include "CaberNet/functions.h"
#include "CaberNet/layers.h"
#include "CaberNet/model.h"
#include "CaberNet/subscripts.h"
#include "CaberNet/criterions.h"
5 changes: 2 additions & 3 deletions include/CaberNet/criterions.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
#include <memory>

#include "tensor.h"
#include "subscripts.h"

namespace internal {
class Criterion;
Expand All @@ -15,8 +14,8 @@ namespace net::criterion {
class NegativeLogLikelihood {
public:
~NegativeLogLikelihood();
NegativeLogLikelihood(Tensor output, Subscripts targets);
Tensor::scalar_type loss() const;
NegativeLogLikelihood(Tensor<float> output, Tensor<int> targets);
float loss() const;

private:
std::unique_ptr<internal::Criterion> criterion_;
Expand Down
8 changes: 4 additions & 4 deletions include/CaberNet/functions.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@

namespace net::function {

Tensor linear(const Tensor& input, const Tensor& weight, const Tensor& bias);
Tensor softmax(Tensor& input, int axis);
Tensor log_softmax(Tensor&input, int axis);
Tensor relu(const Tensor& input);
Tensor<float> linear(const Tensor<float>& input, const Tensor<float>& weight, const Tensor<float>& bias);
Tensor<float> softmax(Tensor<float>& input, int axis);
Tensor<float> log_softmax(Tensor<float>&input, int axis);
Tensor<float> relu(const Tensor<float>& input);

} // namespace net::function
20 changes: 9 additions & 11 deletions include/CaberNet/layers.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,29 +15,29 @@ class Linear : public Model<Linear> {
size_type input_features,
size_type output_features,
initializer distribution = initializer::He );

Tensor forward(Tensor x);
Tensor<float> forward(Tensor<float> x);

private:
Tensor weight_;
Tensor bias_;
Tensor<float> weight_;
Tensor<float> bias_;
};

struct ReLU : public Model<ReLU> {
ReLU() = default;
Tensor forward(Tensor input);
Tensor<float> forward(Tensor<float> input);
};

struct Softmax : public Model<Softmax> {
int axis;
Softmax(int axis);
Tensor forward(Tensor input);
Tensor<float> forward(Tensor<float> input);
};

struct LogSoftmax : public Model<LogSoftmax> {
int axis;
LogSoftmax(int axis);
Tensor forward(Tensor input);
Tensor<float> forward(Tensor<float> input);
};


Expand All @@ -55,16 +55,14 @@ class Sequence : public Model<Sequence> {
Sequence(Layers&& ... layers) {
layers_ = { std::forward<Layers>(layers)... };
}


Tensor forward(Tensor input) {

Tensor<float> forward(Tensor<float> input) {
for (auto& layer : layers_) {
input = std::visit([input](auto&& argument) { return argument.forward(input); }, layer);
}
return input;
}


private:
std::vector<layer_variant> layers_;
};
Expand Down
2 changes: 1 addition & 1 deletion include/CaberNet/model.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ class Model {
using size_type = std::size_t;
using shape_type = std::vector<size_t>;

Tensor operator()(Tensor input) {
Tensor<float> operator()(Tensor<float> input) {
return static_cast<Derived*>(this)->forward(input);
}
};
Expand Down
Loading

0 comments on commit 7538f66

Please sign in to comment.