Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Different data structures to represent layers #110

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,4 @@ PyTorch/sparseconvnet/SCN/__init__.py
sparseconvnet.egg-info
*.zip
*.rar
experiment
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
torch_dir = os.path.dirname(torch.__file__)
conda_include_dir = '/'.join(torch_dir.split('/')[:-4]) + '/include'

extra = {'cxx': ['-std=c++11', '-fopenmp'], 'nvcc': ['-std=c++11', '-Xcompiler', '-fopenmp']}
extra = {'cxx': ['-std=c++14', '-fopenmp'], 'nvcc': ['-std=c++14', '-Xcompiler', '-fopenmp']}

setup(
name='sparseconvnet',
Expand Down
1 change: 1 addition & 0 deletions sparseconvnet/SCN/CPU/Convolution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
// LICENSE file in the root directory of this source tree.

// rows x groups x planes -> groups x rows x planes

template <typename T>
at::Tensor rule_index_select(at::Tensor &src, Int nRules, const Int *rules,
Int groups) {
Expand Down
39 changes: 39 additions & 0 deletions sparseconvnet/SCN/Metadata/32bits.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,39 @@
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.

#pragma once

#include <array>

// Using 32 bit integers for coordinates and memory calculations.

using Int = int32_t;

// Folly's twang_mix64 hashing function
inline uint64_t twang_mix64(uint64_t key) noexcept {
key = (~key) + (key << 21); // key *= (1 << 21) - 1; key -= 1;
key = key ^ (key >> 24);
key = key + (key << 3) + (key << 8); // key *= 1 + (1 << 3) + (1 << 8)
key = key ^ (key >> 14);
key = key + (key << 2) + (key << 4); // key *= 1 + (1 << 2) + (1 << 4)
key = key ^ (key >> 28);
key = key + (key << 31); // key *= 1 + (1 << 31)
return key;
}

// Point<dimension> is a point in the d-dimensional integer lattice
// (i.e. square-grid/cubic-grid, ...)

template <Int dimension> using Point = std::array<Int, dimension>;

template<Int dimension>
Point<dimension> generateEmptyKey() {
Point<dimension> empty_key;
for (Int i = 0; i < dimension; ++i)
empty_key[i] = std::numeric_limits<Int>::min();
return empty_key;
}

template <Int dimension>
Point<dimension> LongTensorToPoint(/*long*/ at::Tensor &t) {
Point<dimension> p;
Expand Down Expand Up @@ -65,4 +88,20 @@ template <Int dimension> struct IntArrayHash {
}
};

// FNV Hash function for Point<dimension>
template <Int dimension> struct FastHash {
std::size_t operator()(Point<dimension> const &p) const {
std::size_t seed = 16777619;

for (auto x : p) {

// from boost
seed ^= twang_mix64(x) + 0x9e3779b9 + (seed<<6) + (seed>>2);
}

return seed;
}
};


#define at_kINT at::kInt
Loading