#include <Normalization.hpp>
|
| BatchNorm2d (const tensor &gamma, const tensor &beta, const tensor &running_mean, const tensor &running_var, const float &epsilon=1e-5) |
| Constructs a BatchNorm2d object. More...
|
|
| BatchNorm2d (const std::string &gamma_filename="", const std::vector< int > &gamma_dims={}, const std::string &beta_filename="", const std::vector< int > &beta_dims={}, const std::string &running_mean_filename="", const std::vector< int > &running_mean_dims={}, const std::string &running_var_filename="", const std::vector< int > &running_var_dims={}, const float &epsilon=1e-5, const std::string &python_home="../scripts") |
| Constructs a BatchNorm2d object and loads the requisite tensors in from filenames and sizes. More...
|
|
void | add_gamma (const std::string &gamma_filename="", const std::vector< int > &gamma_dims={}) |
| Adds gamma to the layer if the name wasn't passed to the constructor. More...
|
|
void | add_beta (const std::string &beta_filename="", const std::vector< int > &beta_dims={}) |
| Adds beta if it wasn't added by the constructor. More...
|
|
void | add_running_mean (const std::string &running_mean_filename="", const std::vector< int > &running_mean_dims={}) |
| Adds running_mean if it wasn't added by the constructor. More...
|
|
void | add_running_var (const std::string &running_var_filename="", const std::vector< int > &running_var_dims={}) |
| Adds running_var if it wasn't added by the constructor. More...
|
|
std::vector< tensor > | forward (const std::vector< tensor > &input) |
| Applies the forward pass of batch normalization. More...
|
|
std::vector< tensor > | operator() (const std::vector< tensor > &input) |
| Applies the forward pass of batch normalization. More...
|
|
◆ BatchNorm2d() [1/2]
pytorch::BatchNorm2d::BatchNorm2d |
( |
const tensor & |
gamma, |
|
|
const tensor & |
beta, |
|
|
const tensor & |
running_mean, |
|
|
const tensor & |
running_var, |
|
|
const float & |
epsilon = 1e-5 |
|
) |
| |
|
inline |
Constructs a BatchNorm2d object.
- Parameters
-
gamma | The multiplier for the affine transform. Saved as 'bn.weight' by pytorch. |
beta | The bias for the affine transform. Saved as 'bn.bias' by pytorch. |
running_mean | The running mean for the batchnorm operation. |
running_var | The running variance for the batchnorm operation. |
epsilon | A factor in the denominator of the transform that adds stability. |
◆ BatchNorm2d() [2/2]
pytorch::BatchNorm2d::BatchNorm2d |
( |
const std::string & |
gamma_filename = "" , |
|
|
const std::vector< int > & |
gamma_dims = {} , |
|
|
const std::string & |
beta_filename = "" , |
|
|
const std::vector< int > & |
beta_dims = {} , |
|
|
const std::string & |
running_mean_filename = "" , |
|
|
const std::vector< int > & |
running_mean_dims = {} , |
|
|
const std::string & |
running_var_filename = "" , |
|
|
const std::vector< int > & |
running_var_dims = {} , |
|
|
const float & |
epsilon = 1e-5 , |
|
|
const std::string & |
python_home = "../scripts" |
|
) |
| |
|
inline |
Constructs a BatchNorm2d object and loads the requisite tensors in from filenames and sizes.
- Parameters
-
gamma_filename | The file where gamma can be found. Will be loaded with numpy.load(filename). |
gamma_dims | The dimensions of gamma in pytorch convention (n, k, h, w) (usually = (1, k, 1, 1)) |
beta_filename | The file where beta can be found. Will be loaded with numpy.load(filename). |
beta_dims | The dimensions of beta in pytorch convention (n, k, h, w) (usually = (1, k, 1, 1)) |
running_mean_filename | The file where running_mean can be found. Will be loaded with numpy.load(filename). |
running_mean_dims | The dimensions of running_mean in pytorch convention (n, k, h, w) (usually = (1, k, 1, 1)) |
running_var_filename | the file where running_var can be found. Will be loaded with numpy.load(filename). |
running_var_dims | The dimensions of running_var in pytorch convention (n, k, h, w) (usually = (1, k, 1, 1)) |
epsilon | A float for numerical stability, 1e-5 by default. |
python_home | Where the utility scripts are - holds the loading script necessary to load up the tensors. |
◆ add_beta()
void pytorch::BatchNorm2d::add_beta |
( |
const std::string & |
beta_filename = "" , |
|
|
const std::vector< int > & |
beta_dims = {} |
|
) |
| |
|
inline |
Adds beta if it wasn't added by the constructor.
- Parameters
-
beta_filename | The file where beta can be found. Will be loaded with numpy.load(filename). |
beta_dims | The dimensions of beta in pytorch convention (n, k, h, w) (usually = (1, k, 1, 1)) |
◆ add_gamma()
void pytorch::BatchNorm2d::add_gamma |
( |
const std::string & |
gamma_filename = "" , |
|
|
const std::vector< int > & |
gamma_dims = {} |
|
) |
| |
|
inline |
Adds gamma to the layer if the name wasn't passed to the constructor.
- Parameters
-
gamma_filename | The file where gamma can be found. Will be loaded with numpy.load(filename). |
gamma_dims | The dimensions of gamma in pytorch convention (n, k, h, w) (usually = (1, k, 1, 1)) |
◆ add_running_mean()
void pytorch::BatchNorm2d::add_running_mean |
( |
const std::string & |
running_mean_filename = "" , |
|
|
const std::vector< int > & |
running_mean_dims = {} |
|
) |
| |
|
inline |
Adds running_mean if it wasn't added by the constructor.
- Parameters
-
running_mean_filename | The file where running_mean can be found. Will be loaded with numpy.load(filename). |
running_mean_dims | The dimensions of running_mean in pytorch convention (n, k, h, w) (usually = (1, k, 1, 1)) |
◆ add_running_var()
void pytorch::BatchNorm2d::add_running_var |
( |
const std::string & |
running_var_filename = "" , |
|
|
const std::vector< int > & |
running_var_dims = {} |
|
) |
| |
|
inline |
Adds running_var if it wasn't added by the constructor.
- Parameters
-
running_var_filename | the file where running_var can be found. Will be loaded with numpy.load(filename). |
running_var_dims | The dimensions of running_var in pytorch convention (n, k, h, w) (usually = (1, k, 1, 1)) |
◆ forward()
std::vector<tensor> pytorch::BatchNorm2d::forward |
( |
const std::vector< tensor > & |
input | ) |
|
|
inlinevirtual |
Applies the forward pass of batch normalization.
- Parameters
-
input | The input data to be normalized. |
- Returns
- The normalized data. The size has not changed.
Implements pytorch::Layer.
◆ operator()()
std::vector<tensor> pytorch::BatchNorm2d::operator() |
( |
const std::vector< tensor > & |
input | ) |
|
|
inlinevirtual |
Applies the forward pass of batch normalization.
- Parameters
-
input | The input data to be normalized. |
- Returns
- The normalized data. The size has not changed.
Implements pytorch::Layer.
◆ beta
tensor pytorch::BatchNorm2d::beta |
|
private |
◆ epsilon
float pytorch::BatchNorm2d::epsilon |
|
private |
◆ gamma
tensor pytorch::BatchNorm2d::gamma |
|
private |
◆ running_mean
tensor pytorch::BatchNorm2d::running_mean |
|
private |
◆ running_var
tensor pytorch::BatchNorm2d::running_var |
|
private |
◆ utils
The documentation for this class was generated from the following file: