Skip to content

Commit

Permalink
First steps working on TemplNet
Browse files Browse the repository at this point in the history
  • Loading branch information
Ithanil committed Apr 18, 2019
1 parent 3ea5fe7 commit d3ccf72
Show file tree
Hide file tree
Showing 20 changed files with 267 additions and 64 deletions.
2 changes: 1 addition & 1 deletion benchmark/common/FFNNBenchmarks.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
#include <tuple>

#include "Timer.hpp"
#include "qnets/net/FeedForwardNeuralNetwork.hpp"
#include "qnets/FeedForwardNeuralNetwork.hpp"

inline double benchmark_FFPropagate(FeedForwardNeuralNetwork * const ffnn, const double * const xdata, const int neval)
{
Expand Down
2 changes: 1 addition & 1 deletion build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

. ./config.sh
mkdir -p build && cd build
cmake -DCMAKE_CXX_COMPILER="${CXX_COMPILER}" -DUSER_CXX_FLAGS="${CXX_FLAGS}" -DUSE_COVERAGE="${USE_COVERAGE}" -DUSE_OPENMP="${USE_OPENMP}" -DGSL_ROOT_DIR="${GSL_ROOT}" ..
cmake -DCMAKE_CXX_COMPILER="${CXX_COMPILER}" -DUSER_CXX_FLAGS="${CXX_FLAGS}" -DUSE_COVERAGE="${USE_COVERAGE}" -DUSE_OPENMP="${USE_OPENMP}" -DGSL_ROOT_DIR="${GSL_ROOT}" -DCMAKE_EXPORT_COMPILE_COMMANDS=ON ..

if [ "$1" = "" ]; then
make -j$(nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || getconf _NPROCESSORS_ONLN 2>/dev/null)
Expand Down
File renamed without changes.
130 changes: 97 additions & 33 deletions include/qnets/net/TemplNet.hpp → include/qnets/TemplNet.hpp
Original file line number Diff line number Diff line change
@@ -1,9 +1,15 @@
#ifndef FFNN_NET_TEMPLNET_HPP
#define FFNN_NET_TEMPLNET_HPP

#include "qnets/tool/PackHelpers.hpp"

#include <array>
#include <vector>
#include <algorithm>
#include <numeric>

namespace templ
{

template <class IteratorT = double *>
class LogACTF // Test Array ACTF
Expand All @@ -12,75 +18,133 @@ class LogACTF // Test Array ACTF

static void f(IteratorT cur, IteratorT end, IteratorT out)
{
for (; cur<end; ++cur, ++out) {
for (; cur < end; ++cur, ++out) {
*out = 1./(1. + exp(-(*cur)));
}
}

static void fd1(IteratorT cur, IteratorT end, IteratorT out)
{
for (; cur<end; ++cur, ++out) {
for (; cur < end; ++cur, ++out) {
*out = 1./(1. + exp(-(*cur))); // f
*out = *out * (1. - *out); // fd1
*out = *out*(1. - *out); // fd1
}
}

static void fd2(IteratorT cur, IteratorT end, IteratorT out)
{
for (; cur<end; ++cur, ++out) {
for (; cur < end; ++cur, ++out) {
*out = 1./(1. + exp(-(*cur))); // f
*out = *out * (1. - *out) * (1. - 2.*(*out)); // fd2
*out = *out*(1. - *out)*(1. - 2.*(*out)); // fd2
}
}

static void fad(IteratorT cur, IteratorT end, IteratorT outf, IteratorT outfd1, IteratorT outfd2)
{
for (; cur<end; ++cur, ++outf, ++outfd1, ++outfd2) {
for (; cur < end; ++cur, ++outf, ++outfd1, ++outfd2) {
*outf = 1./(1. + exp(-(*cur))); // f
*outfd1 = *outf * (1. - *outf); // fd1
*outfd2 = *outfd1 * (1. - 2.*(*outf)); // fd2
*outfd1 = *outf*(1. - *outf); // fd1
*outfd2 = *outfd1*(1. - 2.*(*outf)); // fd2
}
}
};

// --- Configuration "structs" (for user)

// To configure "allocation" of derivative-related std::arrays
// You may still opt OUT of the derivative COMPUTATION dynamically
// at run time, but the pre-reserved memory will always be kept.
template <bool RESERVE_D1 = false, bool RESERVE_D2 = false, bool RESERVE_VD1 = false>
struct DerivSetup
{
static constexpr bool d1 = RESERVE_D1;
static constexpr bool d2 = RESERVE_D2;
static constexpr bool vd1 = RESERVE_VD1;
};

// to pass non-input layers as variadic parameter pack
template <typename SizeT, SizeT NU, class ACTF>
struct Layer
{
static constexpr SizeT size = NU;
using actf = ACTF;
};


template<typename T, T ... ts>
constexpr T const_sum() {
T result = 0;
for(auto &t : { ts... }) result += t;
return result;
// --- Helpers

// count total number of units across Layer pack (recursive)
template <typename SizeT>
constexpr SizeT countUnits() { return 0; } // terminate recursion
template <typename SizeT, class Layer, class ... LAYERS>
constexpr SizeT countUnits()
{
return Layer::size + countUnits<SizeT, LAYERS...>(); // recurse until all layer's units counted
}

// count total number of weights across Layer pack (recursive)
template <typename SizeT>
constexpr SizeT countNVP(SizeT/*prev_nunits*/) { return 0; } // terminate recursion
template <typename SizeT, class Layer, class ... LAYERS>
constexpr SizeT countNVP(SizeT prev_nunits)
{
return Layer::size*(prev_nunits + 1/*offset*/) + countNVP<SizeT, LAYERS...>(Layer::size); // recurse
}

// check for empty layers across pack (use it "passing" first argument as true)
template <bool ret> constexpr bool hasNoEmptyLayer() { return ret; } // terminate recursion
template <bool ret, class Layer, class ... LAYERS>
constexpr bool hasNoEmptyLayer()
{
return hasNoEmptyLayer<(ret && Layer::size > 0), LAYERS...>();
}

template<typename T, T ... ts>
constexpr T const_prod() {
T result = 1;
for(auto &t : { ts... }) result *= t;
return result;
// make array of nunits
template<typename SizeT, class ... LAYERS>
constexpr auto makeNUnitsArray(SizeT input_nunits)
{
return std::array<SizeT, 1 + pack::count<SizeT, LAYERS...>()> { input_nunits, LAYERS::size... };
}

/*template <class SizeT = size_t, SizeT N_IN, SizeT N_OUT, std::initializer_list<SizeT> N_HID>
constexpr calcNVP*/
// --- The fully templated TemplNet FFNN

template <typename SizeT, typename ValueT,
class HiddenACTF, class OutputACTF,
SizeT NU_IN, SizeT NU_OUT, SizeT NL_HID, SizeT ... NUs_HID>
template <typename SizeT, typename ValueT, class DERIV_SETUP, SizeT NU_IN, class ... LAYERS>
class TemplNet
{
private:
bool _flag_d1{}, _flag_d2{}, _flag_vd1{}; // flags that indicate if the substrates for the derivatives have been activated or not
public:
// constexpr sizes
static constexpr SizeT nlayer = 1 /*IN*/ + pack::count<SizeT, LAYERS...>();

static constexpr SizeT _nvp = 0; // global number of variational parameters
// check for mistakes (statically)
static_assert(nlayer > 2, "[TemplNet] nlayer <= 2");
static_assert(hasNoEmptyLayer<(NU_IN > 0), LAYERS...>(), "[TemplNet] Layer pack contains empty Layer.");

public:
static constexpr SizeT NLAYERS = 2 + sizeof...(NUs_HID);
static constexpr SizeT NUNITS = NU_IN + const_sum<SizeT, NUs_HID...>() + NU_OUT;
static constexpr SizeT nunit_tot = NU_IN + countUnits<SizeT, LAYERS...>();
static constexpr SizeT nvp_tot = countNVP<SizeT, LAYERS...>(NU_IN);
static constexpr SizeT nlink_tot = nvp_tot - nunit_tot + NU_IN; // substract offsets

// nunit array
static constexpr std::array<SizeT, nlayer> nunits = makeNUnitsArray<SizeT, LAYERS...>(NU_IN);

// input array
std::array<ValueT, NU_IN> input{};

constexpr TemplNet() = default;
constexpr TemplNet(bool flag_d1, bool flag_d2, bool flag_vd1):
_flag_d1(flag_d1), _flag_d2(flag_d2), _flag_vd1(flag_vd1) {}
// output array (const ref)
const std::array<ValueT, nunits.back()> &output;

private:
std::array<ValueT, nunits.back()> _output{};

bool _flag_d1{}, _flag_d2{}, _flag_vd1{}; // flags that indicate if derivatives should currently be calculated or not

public:
constexpr TemplNet(): output(_output), _flag_d1(DERIV_SETUP::d1), _flag_d2(DERIV_SETUP::d2), _flag_vd1(DERIV_SETUP::vd1) {}
constexpr TemplNet(bool flag_d1, bool flag_d2, bool flag_vd1): TemplNet()
{
_flag_d1 = flag_d1;
_flag_d2 = flag_d2;
_flag_vd1 = flag_vd1;
}

// --- Get information about the NN structure
/*
Expand Down Expand Up @@ -159,6 +223,6 @@ class TemplNet
// --- Store FFNN on file
//void storeOnFile(const char * filename, bool store_betas = true);
};

} // templ

#endif
2 changes: 1 addition & 1 deletion include/qnets/feed/SmartBetaGenerator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
#include "qnets/feed/FeederInterface.hpp"
#include "qnets/feed/NNRay.hpp"
#include "qnets/layer/FedLayer.hpp"
#include "qnets/net/FeedForwardNeuralNetwork.hpp"
#include "qnets/FeedForwardNeuralNetwork.hpp"


namespace smart_beta
Expand Down
2 changes: 1 addition & 1 deletion include/qnets/io/PrintUtilities.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
#define FFNN_IO_PRINTUTILITIES_HPP


#include "qnets/net/FeedForwardNeuralNetwork.hpp"
#include "qnets/FeedForwardNeuralNetwork.hpp"


#include <string>
Expand Down
104 changes: 104 additions & 0 deletions include/qnets/tool/PackHelpers.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
#ifndef QNETS_TOOL_PACKTOOLS_HPP
#define QNETS_TOOL_PACKTOOLS_HPP

#include <functional>

namespace pack
{
// Collection of helpers for dealing with variadic template parameter packs
// Everything in here is meant for usage at compile-time!

// Template Parameter List (to allow multiple parameter packs per template)
template <typename ...>
struct list {};


// count pack and return count as desired integer type
template <typename SizeT, class ... Pack>
constexpr SizeT count() { return static_cast<SizeT>(sizeof...(Pack)); }


// --- Pack Sum

// sum of pack values
template <typename T, T ... ts>
constexpr T sum()
{
T result = 0;
for (auto &t : {ts...}) { result += t; }
return result;
}

// sum of pack values, with start and end
template <typename SizeT, typename T, T ... ts>
constexpr T sum(SizeT begin_index/*count from*/, SizeT end_index/*to before this*/)
{
static_assert(end_index <= count<SizeT, ts...>(), "[pack::sum] end_index > count(pack).");
T result = 0;
SizeT i = 0;
for (auto &t : {ts...}) {
if (i >= begin_index && i < end_index) {
result += t;
}
++i;
}
return result;
}

// --- Pack Product

// product of pack values
template <typename T, T ... ts>
constexpr T prod()
{
T result = 1;
for (auto &t : {ts...}) { result *= t; }
return result;
}

// product of pack values, with start and end
template <typename SizeT, typename T, T ... ts>
constexpr T prod(SizeT begin_index/*count from*/, SizeT end_index/*to before this*/)
{
static_assert(end_index <= count<SizeT, ts...>(), "[pack::prod] end_index > count(pack).");
T result = 1;
SizeT i = 0;
for (auto &t : {ts...}) {
if (i >= begin_index && i < end_index) {
result *= t;
}
++i;
}
return result;
}

// --- Accumulate Function

// accumulate function of pack values
template <typename ValueT, typename T, T ... ts>
constexpr ValueT accumulate(std::function<ValueT(const T&)> func)
{
ValueT result = 0;
for (auto &t : {ts...}) { result += func(t); }
return result;
}

// accumulate function of pack values, with start and end
template <typename SizeT, typename ValueT, typename T, T ... ts>
constexpr ValueT accumulate(SizeT begin_index/*count from*/, SizeT end_index/*to before this*/, std::function<ValueT(const T&)> func)
{
static_assert(end_index <= count<SizeT, ts...>(), "[pack::accumulate] end_index > count(pack).");
ValueT result = 0;
SizeT i = 0;
for (auto &t : {ts...}) {
if (i >= begin_index && i < end_index) {
result += func(t);
}
++i;
}
return result;
}

} // pack

#endif
2 changes: 1 addition & 1 deletion include/qnets/train/NNTrainer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
#define FFNN_TRAIN_NNTRAINER_HPP

#include "qnets/io/PrintUtilities.hpp"
#include "qnets/net/FeedForwardNeuralNetwork.hpp"
#include "qnets/FeedForwardNeuralNetwork.hpp"
#include "qnets/train/NNTrainingConfig.hpp"
#include "qnets/train/NNTrainingData.hpp"
#include <cstddef> // NULL
Expand Down
2 changes: 1 addition & 1 deletion include/qnets/train/NNTrainerGSL.hpp
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#ifndef FFNN_TRAIN_NNTRAINERGSL_HPP
#define FFNN_TRAIN_NNTRAINERGSL_HPP

#include "qnets/net/FeedForwardNeuralNetwork.hpp"
#include "qnets/FeedForwardNeuralNetwork.hpp"
#include "qnets/train/NNTrainer.hpp"
#include "qnets/train/NNTrainingConfig.hpp"
#include "qnets/train/NNTrainingData.hpp"
Expand Down
2 changes: 1 addition & 1 deletion include/qnets/train/NNTrainingConfig.hpp
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#ifndef FFNN_TRAIN_NNTRAININGCONFIG_HPP
#define FFNN_TRAIN_NNTRAININGCONFIG_HPP

#include "qnets/net/FeedForwardNeuralNetwork.hpp"
#include "qnets/FeedForwardNeuralNetwork.hpp"

// holds the required configuration parameters for the trainer
struct NNTrainingConfig
Expand Down
2 changes: 1 addition & 1 deletion src/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
file(GLOB SOURCES "*/*.cpp")
file(GLOB SOURCES "*.cpp" "*/*.cpp")
add_library(qnets SHARED ${SOURCES})
target_link_libraries(qnets "${GSL_LIBRARIES}" "${OpenMP_CXX_LIBRARIES}") # shared libs
add_library(qnets_static STATIC ${SOURCES})
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#include "qnets/net/FeedForwardNeuralNetwork.hpp"
#include "qnets/FeedForwardNeuralNetwork.hpp"

#include <algorithm>
#include <fstream>
Expand Down
2 changes: 1 addition & 1 deletion test/common/checkDerivatives.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
#include <cmath>
#include <iostream>

#include "qnets/net/FeedForwardNeuralNetwork.hpp"
#include "qnets/FeedForwardNeuralNetwork.hpp"

inline void checkDerivatives(FeedForwardNeuralNetwork * const ffnn, const double &TINY)
{
Expand Down
2 changes: 1 addition & 1 deletion test/common/checkStoreOnFile.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
#include <cmath>
#include <iostream>

#include "qnets/net/FeedForwardNeuralNetwork.hpp"
#include "qnets/FeedForwardNeuralNetwork.hpp"

// expects a neural network without substrates but optionally with connection
inline void checkStoreOnFile(FeedForwardNeuralNetwork * const ffnn, const bool isConnected = false)
Expand Down
2 changes: 1 addition & 1 deletion test/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
#include <fstream>
#include <iostream>

#include "qnets/net/FeedForwardNeuralNetwork.hpp"
#include "qnets/FeedForwardNeuralNetwork.hpp"


void printNNStructure(FeedForwardNeuralNetwork &nn)
Expand Down
Loading

0 comments on commit d3ccf72

Please sign in to comment.