Collective Variables Module - Developer Documentation
Loading...
Searching...
No Matches
colvar_neuralnetworkcompute.h
1// -*- c++ -*-
2
3// This file is part of the Collective Variables module (Colvars).
4// The original version of Colvars and its updates are located at:
5// https://github.com/Colvars/colvars
6// Please update all Colvars source files before making any changes.
7// If you wish to distribute your changes, please submit them to the
8// Colvars repository at GitHub.
9
10#ifndef NEURALNETWORKCOMPUTE_H
11#define NEURALNETWORKCOMPUTE_H
12
13#include <vector>
14#include <functional>
15#include <string>
16#include <cmath>
17#include <memory>
18#include <map>
19
20#ifdef LEPTON
21#include "Lepton.h"
22#endif
23
24namespace neuralnetworkCV {
26extern std::map<std::string, std::pair<std::function<double(double)>, std::function<double(double)>>> activation_function_map;
27
28#ifdef LEPTON
29// allow to define a custom activation function
30class customActivationFunction {
31public:
33 customActivationFunction();
35 customActivationFunction(const std::string& expression_string);
37 customActivationFunction(const customActivationFunction& source);
39 customActivationFunction& operator=(const customActivationFunction& source);
41 void setExpression(const std::string& expression_string);
43 std::string getExpression() const;
45 double evaluate(double x) const;
47 double derivative(double x) const;
48private:
49 std::string expression;
50 std::unique_ptr<Lepton::CompiledExpression> value_evaluator;
51 std::unique_ptr<Lepton::CompiledExpression> gradient_evaluator;
52 double* input_reference;
53 double* derivative_reference;
54};
55#endif
56
58private:
59 size_t m_input_size;
60 size_t m_output_size;
61 std::function<double(double)> m_activation_function;
62 std::function<double(double)> m_activation_function_derivative;
63#ifdef LEPTON
64 bool m_use_custom_activation;
65 customActivationFunction m_custom_activation_function;
66#else
67 static const bool m_use_custom_activation = false;
68#endif
70 std::vector<std::vector<double>> m_weights;
72 std::vector<double> m_biases;
73public:
81 denseLayer(const std::string& weights_file, const std::string& biases_file, const std::function<double(double)>& f, const std::function<double(double)>& df);
82#ifdef LEPTON
87 denseLayer(const std::string& weights_file, const std::string& biases_file, const std::string& custom_activation_expression);
88#endif
90 void readFromFile(const std::string& weights_file, const std::string& biases_file);
92 void setActivationFunction(const std::function<double(double)>& f, const std::function<double(double)>& df);
94 void compute(const std::vector<double>& input, std::vector<double>& output) const;
96 double computeGradientElement(const std::vector<double>& input, const size_t i, const size_t j) const;
98 void computeGradient(const std::vector<double>& input, std::vector<std::vector<double>>& output_grad) const;
100 size_t getInputSize() const {
101 return m_input_size;
102 }
104 size_t getOutputSize() const {
105 return m_output_size;
106 }
108 double getWeight(size_t i, size_t j) const {
109 return m_weights[i][j];
110 }
111 double getBias(size_t i) const {
112 return m_biases[i];
113 }
114 ~denseLayer() {}
115};
116
118private:
119 std::vector<denseLayer> m_dense_layers;
120 std::vector<double> m_input;
122 std::vector<std::vector<double>> m_layers_output;
123 std::vector<std::vector<std::vector<double>>> m_grads_tmp;
124 std::vector<std::vector<double>> m_chained_grad;
125private:
127 static std::vector<std::vector<double>> multiply_matrix(const std::vector<std::vector<double>>& A, const std::vector<std::vector<double>>& B);
128public:
129 neuralNetworkCompute(): m_dense_layers(0), m_layers_output(0) {}
130 neuralNetworkCompute(const std::vector<denseLayer>& dense_layers);
131 bool addDenseLayer(const denseLayer& layer);
132 // for faster computation
133 const std::vector<double>& input() const {return m_input;}
134 std::vector<double>& input() {return m_input;}
136 void compute();
137 double getOutput(const size_t i) const {return m_layers_output.back()[i];}
138 double getGradient(const size_t i, const size_t j) const {return m_chained_grad[i][j];}
140 const denseLayer& getLayer(const size_t i) const {return m_dense_layers[i];}
142 size_t getNumberOfLayers() const {return m_dense_layers.size();}
143};
144
145}
146#endif
Definition: colvar_neuralnetworkcompute.h:57
double getWeight(size_t i, size_t j) const
getter for weights and biases
Definition: colvar_neuralnetworkcompute.h:108
void readFromFile(const std::string &weights_file, const std::string &biases_file)
read data from file
Definition: colvar_neuralnetworkcompute.cpp:134
void computeGradient(const std::vector< double > &input, std::vector< std::vector< double > > &output_grad) const
output[i][j] is the gradient of i-th output wrt j-th input
Definition: colvar_neuralnetworkcompute.cpp:228
double computeGradientElement(const std::vector< double > &input, const size_t i, const size_t j) const
compute the gradient of i-th output wrt j-th input
Definition: colvar_neuralnetworkcompute.cpp:209
denseLayer()
empty constructor
Definition: colvar_neuralnetworkcompute.h:75
size_t getOutputSize() const
get the output size
Definition: colvar_neuralnetworkcompute.h:104
void compute(const std::vector< double > &input, std::vector< double > &output) const
compute the value of this layer
Definition: colvar_neuralnetworkcompute.cpp:190
std::vector< double > m_biases
bias of each node
Definition: colvar_neuralnetworkcompute.h:72
size_t getInputSize() const
get the input size
Definition: colvar_neuralnetworkcompute.h:100
void setActivationFunction(const std::function< double(double)> &f, const std::function< double(double)> &df)
setup activation function
Definition: colvar_neuralnetworkcompute.cpp:185
std::vector< std::vector< double > > m_weights
weights[i][j] is the weight of the i-th output and the j-th input
Definition: colvar_neuralnetworkcompute.h:70
Definition: colvar_neuralnetworkcompute.h:117
std::vector< std::vector< double > > m_layers_output
temporary output for each layer, useful to speedup the gradients' calculation
Definition: colvar_neuralnetworkcompute.h:122
static std::vector< std::vector< double > > multiply_matrix(const std::vector< std::vector< double > > &A, const std::vector< std::vector< double > > &B)
helper function: multiply two matrix constructed from 2D vector
Definition: colvar_neuralnetworkcompute.cpp:265
size_t getNumberOfLayers() const
get the number of layers
Definition: colvar_neuralnetworkcompute.h:142
void compute()
compute the values and the gradients of all output nodes
Definition: colvar_neuralnetworkcompute.cpp:286
const denseLayer & getLayer(const size_t i) const
get a specified layer
Definition: colvar_neuralnetworkcompute.h:140