Skip to content

Commit

Permalink
added clang-tidy script and applied suggested fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
Ithanil committed Feb 28, 2019
1 parent 2c03fa4 commit 3b540cd
Show file tree
Hide file tree
Showing 118 changed files with 1,532 additions and 1,280 deletions.
4 changes: 2 additions & 2 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ script:
docker run -it -v $(pwd):/root/repo nnvmc/base /bin/bash -c "cd /root/repo/build && make test";
elif [[ "$USE_GCOV" == "TRUE" ]]; then
docker run -it -v $(pwd):/root/repo nnvmc/base /bin/bash -c "cd /root/repo/build && make test";
docker run -e TRAVIS=$TRAVIS -e TRAVIS_JOB_ID=$TRAVIS_JOB_ID -it -v $(pwd):/root/repo nnvmc/base /bin/bash -c "pip install cpp-coveralls && cd /root/repo/build && coveralls --verbose -b ./ -r ../ -i include -i src -x .cpp -x .hpp --gcov-options '\-lp'";
docker run -e TRAVIS=$TRAVIS -e TRAVIS_JOB_ID=$TRAVIS_JOB_ID -it -v $(pwd):/root/repo nnvmc/base /bin/bash -c "pip install cpp-coveralls && cd /root/repo/build && coveralls -b ./ -r ../ -i include -i src -x .cpp -x .hpp --gcov-options '\-lp'";
else
docker run -it -v $(pwd):/root/repo nnvmc/base /bin/bash -c "cd /root/repo/test && ./run.sh";
fi;
Expand All @@ -83,7 +83,7 @@ script:
cd build && make test && cd ..;
elif [[ "$USE_GCOV" == "TRUE" ]]; then
cd build && make test;
sudo pip install cpp-coveralls && coveralls --verbose -b ./ -r ../ -i include -i src -x .cpp -x .hpp --gcov-options '\-lp' && cd .. ;
sudo pip install cpp-coveralls && coveralls -b ./ -r ../ -i include -i src -x .cpp -x .hpp --gcov-options '\-lp' && cd .. ;
else
cd test && ./run.sh && cd .. ;
fi;
Expand Down
38 changes: 20 additions & 18 deletions benchmark/bench_actfs_derivs/main.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#include <iomanip>
#include <iostream>
#include <random>
#include <iomanip>

#include "ffnn/actf/ActivationFunctionManager.hpp"
#include "ffnn/io/PrintUtilities.hpp"
Expand All @@ -14,17 +14,17 @@ void run_single_benchmark(const string &label, const string &actf_id, const doub
const double time_scale = 1000000000.; //nanoseconds

result = sample_benchmark_actf_derivs(std_actf::provideActivationFunction(actf_id), xdata, neval, nruns, flag_d1, flag_d2, flag_d3, flag_fad);
cout << label << ":" << setw(max(1, 11-(int)label.length())) << setfill(' ') << " " << result.first/neval*time_scale << " +- " << result.second/neval*time_scale << " nanoseconds" << endl;
cout << label << ":" << setw(max(1, 11-static_cast<int>(label.length()))) << setfill(' ') << " " << result.first/neval*time_scale << " +- " << result.second/neval*time_scale << " nanoseconds" << endl;
}

int main (void) {
int main () {
const int neval = 100000;
const int nruns = 10;

const int nactfs = 8;
const string actf_ids[nactfs] = {"LGS", "GSS", "ID", "TANS", "SIN", "RELU", "SELU", "SRLU"};

double * xdata = new double[neval]; // 1d input data for actf bench
auto * xdata = new double[neval]; // 1d input data for actf bench

// generate some random input
random_device rdev;
Expand All @@ -33,24 +33,26 @@ int main (void) {
rgen = mt19937_64(rdev());
rgen.seed(18984687);
rd = uniform_real_distribution<double>(-sqrt(3.), sqrt(3.)); // uniform with variance 1
for (int i=0; i<neval; ++i) xdata[i] = rd(rgen);
for (int i=0; i<neval; ++i) { xdata[i] = rd(rgen);
}

// ACTF deriv benchmark
for (int iactf=0; iactf<nactfs; ++iactf) {
cout << "ACTF derivative benchmark with " << nruns << " runs of " << neval << " evaluations for " << actf_ids[iactf] << " activation function." << endl;
for (const auto & actf_id : actf_ids) {
cout << "ACTF derivative benchmark with " << nruns << " runs of " << neval << " evaluations for " << actf_id << " activation function." << endl;
cout << "===========================================================================================" << endl << endl;
for (bool flag_fad : { false, true }) {
if (flag_fad) cout << "Time per evaluation using fad function call:" << endl;
else cout << "Time per evaluation using individual function calls:" << endl;

run_single_benchmark("f", actf_ids[iactf], xdata, neval, nruns, false, false, false, flag_fad);
run_single_benchmark("f+d1", actf_ids[iactf], xdata, neval, nruns, true, false, false, flag_fad);
run_single_benchmark("f+d2", actf_ids[iactf], xdata, neval, nruns, false, true, false, flag_fad);
run_single_benchmark("f+d3", actf_ids[iactf], xdata, neval, nruns, false, false, true, flag_fad);
run_single_benchmark("f+d1+d2", actf_ids[iactf], xdata, neval, nruns, true, true, false, flag_fad);
run_single_benchmark("f+d1+d3", actf_ids[iactf], xdata, neval, nruns, true, false, true, flag_fad);
run_single_benchmark("f+d2+d3", actf_ids[iactf], xdata, neval, nruns, false, true, true, flag_fad);
run_single_benchmark("f+d1+d2+d3", actf_ids[iactf], xdata, neval, nruns, true, true, true, flag_fad);
if (flag_fad) { cout << "Time per evaluation using fad function call:" << endl;
} else { cout << "Time per evaluation using individual function calls:" << endl;
}

run_single_benchmark("f", actf_id, xdata, neval, nruns, false, false, false, flag_fad);
run_single_benchmark("f+d1", actf_id, xdata, neval, nruns, true, false, false, flag_fad);
run_single_benchmark("f+d2", actf_id, xdata, neval, nruns, false, true, false, flag_fad);
run_single_benchmark("f+d3", actf_id, xdata, neval, nruns, false, false, true, flag_fad);
run_single_benchmark("f+d1+d2", actf_id, xdata, neval, nruns, true, true, false, flag_fad);
run_single_benchmark("f+d1+d3", actf_id, xdata, neval, nruns, true, false, true, flag_fad);
run_single_benchmark("f+d2+d3", actf_id, xdata, neval, nruns, false, true, true, flag_fad);
run_single_benchmark("f+d1+d2+d3", actf_id, xdata, neval, nruns, true, true, true, flag_fad);

cout << endl;
}
Expand Down
17 changes: 9 additions & 8 deletions benchmark/bench_actfs_ffprop/main.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#include <iomanip>
#include <iostream>
#include <random>
#include <iomanip>

#include "ffnn/actf/ActivationFunctionManager.hpp"
#include "ffnn/io/PrintUtilities.hpp"
Expand All @@ -15,10 +15,10 @@ void run_single_benchmark(const string &label, FeedForwardNeuralNetwork * const
const double time_scale = 1000000.; //microseconds

result = sample_benchmark_FFPropagate(ffnn, xdata, neval, nruns);
cout << label << ":" << setw(max(1, 20-(int)label.length())) << setfill(' ') << " " << result.first/neval*time_scale << " +- " << result.second/neval*time_scale << " microseconds" << endl;
cout << label << ":" << setw(max(1, 20-static_cast<int>(label.length()))) << setfill(' ') << " " << result.first/neval*time_scale << " +- " << result.second/neval*time_scale << " microseconds" << endl;
}

int main (void) {
int main () {
const int neval = 1000;
const int nruns = 5;

Expand All @@ -30,7 +30,7 @@ int main (void) {
const string actf_ids[nactfs] = {"LGS", "GSS", "ID", "TANS", "SIN", "RELU", "SELU", "SRLU"};

const int ndata = neval*xndim;
double * xdata = new double[ndata]; // xndim input data for propagate bench
auto * xdata = new double[ndata]; // xndim input data for propagate bench

// generate some random input
random_device rdev;
Expand All @@ -44,16 +44,17 @@ int main (void) {
}

// FFPropagate benchmark
for (int iactf=0; iactf<nactfs; ++iactf) {
for (const auto & actf_id : actf_ids) {
FeedForwardNeuralNetwork * ffnn = new FeedForwardNeuralNetwork(xndim+1, nhu[0], yndim+1);
for (int i=1; i<nhl; ++i) ffnn->pushHiddenLayer(nhu[i]);
for (int i=1; i<nhl; ++i) { ffnn->pushHiddenLayer(nhu[i]);
}
ffnn->connectFFNN();
ffnn->assignVariationalParameters();

//Set ACTFs for hidden units
for (int i=0; i<nhl; ++i) {
for (int j=1; j<nhu[i]; ++j) {
ffnn->getNNLayer(i)->getNNUnit(j-1)->setActivationFunction(std_actf::provideActivationFunction(actf_ids[iactf]));
ffnn->getNNLayer(i)->getNNUnit(j-1)->setActivationFunction(std_actf::provideActivationFunction(actf_id));
}
}

Expand All @@ -62,7 +63,7 @@ int main (void) {
ffnn->getNNLayer(nhl)->getNNUnit(j-1)->setActivationFunction(std_actf::provideActivationFunction("ID"));
}

cout << "FFPropagate benchmark with " << nruns << " runs of " << neval << " FF-Propagations for " << actf_ids[iactf] << " activation function." << endl;
cout << "FFPropagate benchmark with " << nruns << " runs of " << neval << " FF-Propagations for " << actf_id << " activation function." << endl;
cout << "=========================================================================================" << endl << endl;
cout << "NN structure looks like:" << endl << endl;
printFFNNStructure(ffnn, true, 0);
Expand Down
11 changes: 6 additions & 5 deletions benchmark/bench_nunits_ffprop/main.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#include <iomanip>
#include <iostream>
#include <random>
#include <iomanip>

#include "ffnn/actf/ActivationFunctionManager.hpp"
#include "ffnn/io/PrintUtilities.hpp"
Expand All @@ -15,10 +15,10 @@ void run_single_benchmark(const string &label, FeedForwardNeuralNetwork * const
const double time_scale = 1000000.; //microseconds

result = sample_benchmark_FFPropagate(ffnn, xdata, neval, nruns);
cout << label << ":" << setw(max(1, 20-(int)label.length())) << setfill(' ') << " " << result.first/neval*time_scale << " +- " << result.second/neval*time_scale << " microseconds" << endl;
cout << label << ":" << setw(max(1, 20-static_cast<int>(label.length()))) << setfill(' ') << " " << result.first/neval*time_scale << " +- " << result.second/neval*time_scale << " microseconds" << endl;
}

int main (void) {
int main () {
const int neval[3] = {50000, 1000, 20};
const int nruns = 5;

Expand All @@ -31,7 +31,7 @@ int main (void) {
ndata[i] = neval[i]*xndim[i];
ndata_full += ndata[i];
}
double * xdata = new double[ndata_full]; // xndim input data for propagate bench
auto * xdata = new double[ndata_full]; // xndim input data for propagate bench

// generate some random input
random_device rdev;
Expand All @@ -48,7 +48,8 @@ int main (void) {
int xoffset = 0; // used to shift current xdata pointer
for (int inet=0; inet<3; ++inet) {
FeedForwardNeuralNetwork * ffnn = new FeedForwardNeuralNetwork(xndim[inet]+1, nhu1[inet]+1, yndim+1);
for (int i=1; i<nhl; ++i) ffnn->pushHiddenLayer(nhu2[inet]);
for (int i=1; i<nhl; ++i) { ffnn->pushHiddenLayer(nhu2[inet]);
}
ffnn->connectFFNN();
ffnn->assignVariationalParameters();

Expand Down
22 changes: 12 additions & 10 deletions benchmark/common/FFNNBenchmarks.hpp
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
#include <iostream>
#include <cmath>
#include <iostream>
#include <tuple>

#include "ffnn/net/FeedForwardNeuralNetwork.hpp"
#include "Timer.hpp"
#include "ffnn/net/FeedForwardNeuralNetwork.hpp"

double benchmark_FFPropagate(FeedForwardNeuralNetwork * const ffnn, const double * const xdata, const int neval) {
inline double benchmark_FFPropagate(FeedForwardNeuralNetwork * const ffnn, const double * const xdata, const int neval) {
Timer timer(1.);
const int ninput = ffnn->getNInput();

Expand All @@ -18,7 +18,7 @@ double benchmark_FFPropagate(FeedForwardNeuralNetwork * const ffnn, const double
return timer.elapsed();
}

std::pair<double, double> sample_benchmark_FFPropagate(FeedForwardNeuralNetwork * const ffnn, const double * const xdata, const int neval, const int nruns) {
inline std::pair<double, double> sample_benchmark_FFPropagate(FeedForwardNeuralNetwork * const ffnn, const double * const xdata, const int neval, const int nruns) {
double times[nruns];
double mean = 0., err = 0.;

Expand All @@ -27,15 +27,16 @@ std::pair<double, double> sample_benchmark_FFPropagate(FeedForwardNeuralNetwork
mean += times[i];
}
mean /= nruns;
for (int i=0; i<nruns; ++i) err += pow(times[i]-mean, 2);
for (int i=0; i<nruns; ++i) { err += pow(times[i]-mean, 2);
}
err /= (nruns-1)*nruns; // variance of the mean
err = sqrt(err); // standard error of the mean

const std::pair<double, double> result(mean, err);
return result;
}

double benchmark_actf_derivs(ActivationFunctionInterface * const actf, const double * const xdata, const int neval, const bool flag_d1 = true, const bool flag_d2 = true, const bool flag_d3 = true, const bool flag_fad = true) {
inline double benchmark_actf_derivs(ActivationFunctionInterface * const actf, const double * const xdata, const int neval, const bool flag_d1 = true, const bool flag_d2 = true, const bool flag_d3 = true, const bool flag_fad = true) {
Timer timer(1.);
double v=0., v1d=0., v2d=0., v3d=0.;

Expand All @@ -46,7 +47,7 @@ double benchmark_actf_derivs(ActivationFunctionInterface * const actf, const dou
}
return timer.elapsed();
}
else {

timer.reset();
for (int i=0; i<neval; ++i) {
v = actf->f(xdata[i]);
Expand All @@ -55,10 +56,10 @@ double benchmark_actf_derivs(ActivationFunctionInterface * const actf, const dou
v3d = flag_d3 ? actf->f3d(xdata[i]) : 0.0;
}
return timer.elapsed();
}

}

std::pair<double, double> sample_benchmark_actf_derivs(ActivationFunctionInterface * const actf, const double * const xdata, const int neval, const int nruns, const bool flag_d1 = true, const bool flag_d2 = true, const bool flag_d3 = true, const bool flag_fad = true) {
inline std::pair<double, double> sample_benchmark_actf_derivs(ActivationFunctionInterface * const actf, const double * const xdata, const int neval, const int nruns, const bool flag_d1 = true, const bool flag_d2 = true, const bool flag_d3 = true, const bool flag_fad = true) {
double times[nruns];
double mean = 0., err = 0.;

Expand All @@ -67,7 +68,8 @@ std::pair<double, double> sample_benchmark_actf_derivs(ActivationFunctionInterfa
mean += times[i];
}
mean /= nruns;
for (int i=0; i<nruns; ++i) err += pow(times[i]-mean, 2);
for (int i=0; i<nruns; ++i) { err += pow(times[i]-mean, 2);
}
err /= (nruns-1)*nruns; // variance of the mean
err = sqrt(err); // standard error of the mean

Expand Down
9 changes: 5 additions & 4 deletions benchmark/common/Timer.hpp
Original file line number Diff line number Diff line change
@@ -1,17 +1,18 @@
#include <iostream>
#include <chrono>
#include <iostream>

class Timer
{
public:
explicit Timer(const double scale) : _beg(_clock::now()), _scale(scale) {}
void reset() { _beg = _clock::now(); }
double elapsed() const {
return _scale * std::chrono::duration_cast<_second>(_clock::now() - _beg).count(); }
using second = std::chrono::duration<double, std::ratio<1>>;
return _scale * std::chrono::duration_cast<second>(_clock::now() - _beg).count();
}

private:
typedef std::chrono::high_resolution_clock _clock;
using _clock = std::chrono::high_resolution_clock;
std::chrono::time_point<_clock> _beg;
const double _scale;
typedef std::chrono::duration<double, std::ratio<1>> _second;
};
6 changes: 3 additions & 3 deletions examples/ex1/main.cpp
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
#include <iostream>
#include <cmath>
#include <fstream>
#include <iostream>

#include "ffnn/net/FeedForwardNeuralNetwork.hpp"
#include "ffnn/io/PrintUtilities.hpp"
#include "ffnn/net/FeedForwardNeuralNetwork.hpp"


int main() {
Expand All @@ -29,7 +29,7 @@ int main() {
cin.ignore();

// NON I/O CODE
FeedForwardNeuralNetwork * ffnn = new FeedForwardNeuralNetwork(n0,n1,n2);
auto * ffnn = new FeedForwardNeuralNetwork(n0,n1,n2);
//

cout << "Graphically it looks like this:" << endl;
Expand Down
27 changes: 17 additions & 10 deletions examples/ex10/main.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#include <iostream>
#include <cmath>
#include <iostream>
#include <random>

#include "ffnn/train/NNTrainerGSL.hpp"
Expand Down Expand Up @@ -38,7 +38,7 @@ double gaussian_d2dx(const double x, const double a = 1., const double b = 0.) {



int main (void) {
int main () {
using namespace std;

int nhl, nfits = 1;
Expand All @@ -64,11 +64,14 @@ int main (void) {
cout << endl;

int nl = nhl + 2;
if (flag_fm) nl += 1;
if (flag_fm) { nl += 1;
}

cout << "We generate a FFANN with " << nl << " layers and 3, ";
if (flag_fm) cout << "2, ";
for (int i=0; i<nhl; ++i) cout << nhu[i] << ", ";
if (flag_fm) { cout << "2, ";
}
for (int i=0; i<nhl; ++i) { cout << nhu[i] << ", ";
}
cout << "2 units respectively" << endl;
cout << "========================================================================" << endl;
cout << endl;
Expand All @@ -83,19 +86,22 @@ int main (void) {
cout << "Please enter the the maximum tolerable fit residual. (0 to disable) ";
cin >> maxchi;
cout << "Please enter the ";
if (maxchi > 0) cout << "maximum ";
if (maxchi > 0) { cout << "maximum ";
}
cout << "number of fitting runs. (>0) ";
cin >> nfits;
cout << endl << endl;
cout << "Now we find the best fit ... " << endl;
if (!verbose) cout << "NOTE: You may increase the amount of displayed information by setting verbose to true in the head of main." << endl;
if (!verbose) { cout << "NOTE: You may increase the amount of displayed information by setting verbose to true in the head of main." << endl;
}
cout << endl;

// NON I/O CODE

// create FFNN
FeedForwardNeuralNetwork * ffnn = new FeedForwardNeuralNetwork(3, nhu[0], 2);
for (int i = 1; i<nhl; ++i) ffnn->pushHiddenLayer(nhu[i]);
for (int i = 1; i<nhl; ++i) { ffnn->pushHiddenLayer(nhu[i]);
}

if (flag_fm) {
ffnn->pushFeatureMapLayer(3);
Expand Down Expand Up @@ -123,7 +129,7 @@ int main (void) {
const int xndim = 2;
const int yndim = 1;

NNTrainingData tdata = {ndata, ntraining, nvalidation, xndim, yndim, NULL, NULL, NULL, NULL, NULL}; // we pass NULLs here, since we use tdata.allocate to allocate the data arrays. Alternatively, allocate manually and pass pointers here
NNTrainingData tdata = {ndata, ntraining, nvalidation, xndim, yndim, nullptr, nullptr, nullptr, nullptr, nullptr}; // we pass NULLs here, since we use tdata.allocate to allocate the data arrays. Alternatively, allocate manually and pass pointers here
NNTrainingConfig tconfig = {lambda_r, lambda_d1, lambda_d2, maxn_steps, maxn_novali};

// allocate data arrays
Expand Down Expand Up @@ -159,7 +165,8 @@ int main (void) {
+ gaussian(tdata.x[i][0]-tdata.x[i][1]) * gaussian(tdata.x[i][0]) * gaussian_d2dx(tdata.x[i][1]);
}
tdata.w[i][0] = 1.0; // our data have no error, so set all weights to 1
if (verbose) printf ("data: %i %g %g\n", i, tdata.x[i][0]-tdata.x[i][1], tdata.y[i][0]);
if (verbose) { printf ("data: %i %g %g\n", i, tdata.x[i][0]-tdata.x[i][1], tdata.y[i][0]);
}
}


Expand Down
4 changes: 2 additions & 2 deletions examples/ex2/main.cpp
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
#include <iostream>
#include <cmath>
#include <fstream>
#include <iostream>

#include "ffnn/net/FeedForwardNeuralNetwork.hpp"
#include "ffnn/io/PrintUtilities.hpp"
#include "ffnn/net/FeedForwardNeuralNetwork.hpp"


int main() {
Expand Down
Loading

0 comments on commit 3b540cd

Please sign in to comment.