Skip to content
This repository has been archived by the owner on Jul 24, 2024. It is now read-only.

Commit

Permalink
templates
Browse files Browse the repository at this point in the history
  • Loading branch information
mr-raccoon-97 committed Oct 6, 2023
1 parent 998fdb2 commit f657790
Show file tree
Hide file tree
Showing 13 changed files with 155 additions and 175 deletions.
8 changes: 4 additions & 4 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@ set(SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/src)
set(HEADERS
${INCLUDE_DIR}/CaberNet.h
${INCLUDE_DIR}/CaberNet/tensor.h
${INCLUDE_DIR}/CaberNet/tensor/tensor_float32.h
${INCLUDE_DIR}/CaberNet/tensor/tensor_int16.h
${INCLUDE_DIR}/CaberNet/tensor/tensor_float.h
${INCLUDE_DIR}/CaberNet/tensor/tensor_int.h
${INCLUDE_DIR}/CaberNet/functions.h
${INCLUDE_DIR}/CaberNet/layers.h
${INCLUDE_DIR}/CaberNet/model.h
Expand All @@ -20,8 +20,8 @@ set(HEADERS

set(SOURCES
${SOURCE_DIR}/tensor.cpp
${SOURCE_DIR}/tensor/tensor_float32.cpp
${SOURCE_DIR}/tensor/tensor_int16.cpp
${SOURCE_DIR}/tensor/tensor_float.cpp
${SOURCE_DIR}/tensor/tensor_int.cpp
${SOURCE_DIR}/functions.cpp
${SOURCE_DIR}/layers.cpp
${SOURCE_DIR}/criterions.cpp
Expand Down
2 changes: 1 addition & 1 deletion LICENSE
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
MIT License

Copyright (c) 2023 Jiahao Li
Copyright (c) 2023 Eric Cardozo

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

## Join the Discord:

https://discord.gg/5Z7Xfs8Q
https://discord.gg/QJsKT82a

## Description

Expand Down
18 changes: 7 additions & 11 deletions include/CaberNet/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,25 +4,21 @@
#include <vector>
#include <memory>

#include "tensor/tensor_float32.h"
#include "tensor/tensor_int16.h"
#include "tensor/tensor_float.h"
#include "tensor/tensor_int.h"

namespace net {

template<typename T = float>
class Tensor {
public:
Tensor() { throw std::runtime_error("Bad type or not implemented type."); }
};
template<typename> class Tensor;

template<>
struct Tensor<float> : public TensorFloat32 {
using TensorFloat32::TensorFloat32;
struct Tensor<float> : public TensorFloat {
using TensorFloat::TensorFloat;
};

template<>
struct Tensor<int> : public TensorInt16 {
using TensorInt16::TensorInt16;
struct Tensor<int> : public TensorInt {
using TensorInt::TensorInt;
};

std::ostream& operator<<(std::ostream& ostream, const Tensor<float>& tensor);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ namespace net {

template<typename T> class Tensor;

class TensorFloat32 {
class TensorFloat {
public:
using value_type = float; // Needed for GMock's built-in matches
using pointer = value_type*;
Expand All @@ -25,10 +25,10 @@ class TensorFloat32 {
using iterator = storage_type::iterator;
using const_iterator = storage_type::const_iterator;

TensorFloat32() = default;
TensorFloat32(std::shared_ptr<internal::Tensor> tensor);
TensorFloat32(shape_type shape, bool gradient_requirement = false);
TensorFloat32(shape_type shape, requires_gradient gradient_requirement);
TensorFloat() = default;
TensorFloat(std::shared_ptr<internal::Tensor> tensor);
TensorFloat(shape_type shape, bool gradient_requirement = false);
TensorFloat(shape_type shape, requires_gradient gradient_requirement);

void reshape(shape_type shape);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,9 @@ namespace net {

template<typename T> class Tensor;

class TensorInt16 {
class TensorInt {
public:
using value_type = int16_t;
using value_type = int;
using pointer = value_type*;
using const_pointer = const value_type*;

Expand All @@ -21,9 +21,9 @@ class TensorInt16 {
using iterator = std::vector<value_type>::iterator;
using const_iterator = std::vector<value_type>::const_iterator;

TensorInt16() = default;
TensorInt16(std::shared_ptr<internal::Array<value_type>> subscripts);
TensorInt16(shape_type shape);
TensorInt() = default;
TensorInt(std::shared_ptr<internal::Array<value_type>> subscripts);
TensorInt(shape_type shape);

void reshape(shape_type shape);
void fill(value_type value);
Expand Down
4 changes: 2 additions & 2 deletions src/internals/config.h
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
/**
/*
Note: This file is used to configure the internal library.
Eigen is used as the default backend for the library, and should be included only
in the cpp files, not in the h or hpp files.
This is for making the implementation of the operations independent of the internal
library. The internal library should be able to use any backend, and the user should
be able to choose the backend when compiling the library.
**/
*/

#ifndef INTERNAL_CONFIG_H
#define INTERNAL_CONFIG_H
Expand Down
8 changes: 4 additions & 4 deletions src/internals/criterions/internal_criterions.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ class Criterion {
using shape_type = Tensor::shape_type;
using scalar_type = Tensor::scalar_type;

Criterion(Tensor* output, Array<int16_t>* targets) {
Criterion(Tensor* output, Array<int>* targets) {
output_ = output;
targets_ = targets;
}
Expand All @@ -23,20 +23,20 @@ class Criterion {
virtual scalar_type loss() const = 0;

Tensor* output() const { return output_; }
Array<int16_t>* targets() const { return targets_; }
Array<int>* targets() const { return targets_; }

size_type number_of_classes() const { return output()->size() / batch_size(); }
size_type batch_size() const { return output()->shape().front(); }

private:
Tensor* output_;
Array<int16_t>* targets_;
Array<int>* targets_;
};

class NLLLoss : public Criterion {
public:
~NLLLoss() final = default;
NLLLoss(Tensor* output, Array<int16_t>* targets) : Criterion(output, targets) {}
NLLLoss(Tensor* output, Array<int>* targets) : Criterion(output, targets) {}
scalar_type loss() const final;
};

Expand Down
6 changes: 2 additions & 4 deletions src/internals/internal_expression.hpp
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
/*************************************************************************************************\
/*
This is just an interface class for the non-leaf nodes of the computational graph.
/*************************************************************************************************/
*/

#ifndef INTERNAL_EXPRESSION_HPP
#define INTERNAL_EXPRESSION_HPP
Expand Down
81 changes: 81 additions & 0 deletions src/tensor/tensor_float.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
#include "../../include/CaberNet/tensor/tensor_float.h"
#include "../../include/CaberNet/tensor.h"

#include "../internals/internal_tensor.hpp"
#include "../internals/internal_graph.hpp"
#include "../internals/operations/internal_operations.hpp"

namespace net {

TensorFloat::TensorFloat(std::shared_ptr<internal::Tensor> tensor) {
tensor_ = tensor;
internal::Graph::add(tensor_);
}

TensorFloat::TensorFloat(shape_type shape, bool gradient_requirement ) {
tensor_ = std::make_shared<internal::Tensor>(shape);
tensor_-> requires_gradient(gradient_requirement);
internal::Graph::add(tensor_);
}

TensorFloat::TensorFloat(shape_type shape, requires_gradient gradient_requirement ) {
tensor_ = std::make_shared<internal::Tensor>(shape);
tensor_-> requires_gradient(static_cast<bool>(gradient_requirement));
internal::Graph::add(tensor_);
}

void TensorFloat::reshape(shape_type shape) {
if(tensor_ == nullptr) tensor_ = std::make_shared<internal::Tensor>(shape, false, false);
tensor_-> reshape(shape);
}

Tensor<float> TensorFloat::gradient() const {
Tensor<float> gradient = std::make_shared<internal::Tensor>(shape(), false);
std::copy(tensor_->gradient()->begin(), tensor_->gradient()->end(), gradient.begin());
return gradient;
}

internal::Tensor* TensorFloat::internal() const {return tensor_.get(); }
internal::Tensor* TensorFloat::internal() { return tensor_.get(); }

void TensorFloat::backward(const Tensor<float>& gradient) { tensor_-> backward(gradient.internal()); }
void TensorFloat::perform() { tensor_-> forward(); } // TODO : this should have a return type.

TensorFloat::iterator TensorFloat::begin() { return tensor_->begin(); }
TensorFloat::iterator TensorFloat::end() { return tensor_->end(); }
TensorFloat::const_iterator TensorFloat::begin() const { return tensor_->begin(); }
TensorFloat::const_iterator TensorFloat::end() const { return tensor_->end(); }
TensorFloat::const_iterator TensorFloat::cbegin() const { return tensor_->cbegin(); }
TensorFloat::const_iterator TensorFloat::cend() const { return tensor_->cend(); }

TensorFloat::pointer TensorFloat::data() { return tensor_->data(); }
TensorFloat::const_pointer TensorFloat::data() const { return tensor_->data(); }
TensorFloat::shape_type TensorFloat::shape() const { return tensor_->shape(); }
TensorFloat::size_type TensorFloat::rank() const { return tensor_->rank(); }

void TensorFloat::fill(initializer distribution) {
distribution::Distribution<value_type>* filler = nullptr;
switch (distribution) {
case initializer::He :
filler = new distribution::Normal<value_type>(0, std::sqrt(2.0 / shape().back()));
for (auto& element : *this) element = filler->generate();
break;

default :
throw std::runtime_error("Invalid initializer");
break;
}

delete filler;
}

void TensorFloat::fill(value_type value) {
std::fill(tensor_->begin(), tensor_->end(), value);
}

void TensorFloat::fill(std::vector<value_type> values) {
std::move(values.begin(), values.end(), tensor_->begin());
}

} // namespace net

95 changes: 0 additions & 95 deletions src/tensor/tensor_float32.cpp

This file was deleted.

Loading

0 comments on commit f657790

Please sign in to comment.