#include <inference_engine.hpp>
◆ inference_engine()
pytorch::inference_engine::inference_engine |
( |
const int & |
device = 0 , |
|
|
af::Backend |
backend = AF_BACKEND_CUDA , |
|
|
bool |
quiet = true |
|
) |
| |
|
inline |
◆ ~inference_engine()
virtual pytorch::inference_engine::~inference_engine |
( |
| ) |
|
|
inlinevirtual |
◆ add_layer() [1/2]
void pytorch::inference_engine::add_layer |
( |
Layer * |
l | ) |
|
|
inline |
◆ add_layer() [2/2]
void pytorch::inference_engine::add_layer |
( |
std::vector< Layer *> |
l | ) |
|
|
inline |
◆ forward()
tensor pytorch::inference_engine::forward |
( |
const std::vector< tensor > & |
input | ) |
|
|
inline |
◆ get_layer_ptr()
Layer* pytorch::inference_engine::get_layer_ptr |
( |
const int & |
depth, |
|
|
const int & |
width = 0 |
|
) |
| |
|
inline |
◆ device
const int pytorch::inference_engine::device |
|
private |
◆ layers
std::vector<std::vector<pytorch::Layer *> > pytorch::inference_engine::layers |
|
private |
The documentation for this class was generated from the following file: