Skip to content

Commit

Permalink
Merge pull request #39 from DCM-UPB/templnet_features
Browse files Browse the repository at this point in the history
Templnet features
  • Loading branch information
Ithanil authored Nov 11, 2019
2 parents 856b3c3 + fa15af4 commit 35475b5
Show file tree
Hide file tree
Showing 7 changed files with 215 additions and 112 deletions.
6 changes: 3 additions & 3 deletions benchmark/bench_templ_ffprop/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -75,23 +75,23 @@ int main()
using L1Type_s = LayerConfig<nhu1[0], actf::Sigmoid>;
using L2Type_s = LayerConfig<nhu2[0], actf::Sigmoid>;
using L3Type_s = LayerConfig<yndim, actf::Sigmoid>;
using NetType_s = TemplNet<RealT, dconf, xndim[0], L1Type_s, L2Type_s, L3Type_s>;
using NetType_s = TemplNet<RealT, dconf, xndim[0], xndim[0], L1Type_s, L2Type_s, L3Type_s>;
auto tnet_s_ptr = std::make_unique<NetType_s>();
auto &tnet_s = *tnet_s_ptr;

// Medium Net
using L1Type_m = LayerConfig<nhu1[1], actf::Sigmoid>;
using L2Type_m = LayerConfig<nhu2[1], actf::Sigmoid>;
using L3Type_m = LayerConfig<yndim, actf::Sigmoid>;
using NetType_m = TemplNet<RealT, dconf, xndim[1], L1Type_m, L2Type_m, L3Type_m>;
using NetType_m = TemplNet<RealT, dconf, xndim[1], xndim[1], L1Type_m, L2Type_m, L3Type_m>;
auto tnet_m_ptr = std::make_unique<NetType_m>();
auto &tnet_m = *tnet_m_ptr;

// Large Net
using L1Type_l = LayerConfig<nhu1[2], actf::Sigmoid>;
using L2Type_l = LayerConfig<nhu2[2], actf::Sigmoid>;
using L3Type_l = LayerConfig<yndim, actf::Sigmoid>;
using NetType_l = TemplNet<RealT, dconf, xndim[2], L1Type_l, L2Type_l, L3Type_l>;
using NetType_l = TemplNet<RealT, dconf, xndim[2], xndim[2], L1Type_l, L2Type_l, L3Type_l>;
auto tnet_l_ptr = std::make_unique<NetType_l>();
auto &tnet_l = *tnet_l_ptr;

Expand Down
3 changes: 1 addition & 2 deletions benchmark/common/FFNNBenchmarks.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,7 @@ inline double benchmark_TemplProp(TemplNet &tnet, const double xdata[], const in

timer.reset();
for (int i = 0; i < neval; ++i) {
tnet.setInput(xdata + i*ninput, xdata + (i+1)*ninput);
tnet.FFPropagate();
tnet.Propagate(xdata + i*ninput);
}

return timer.elapsed();
Expand Down
16 changes: 8 additions & 8 deletions include/qnets/templ/LayerPackTools.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,18 +25,18 @@ constexpr int nbeta_next() { return 0; }
template <class LConf1, class LConf2, class ... Rest> // LConf2 is "next"
constexpr int nbeta_next() { return (1 + LConf1::noutput)*LConf2::noutput; }

template <typename ValueT, DerivConfig DCONF, int NET_NINPUT, int NET_NOUTPUT, int N_IN, class>
template <typename ValueT, DerivConfig DCONF, int ORIG_NINPUT, int NET_NINPUT, int NET_NOUTPUT, int N_IN, class>
struct LayerPackTuple_rec
{
using type = std::tuple<>;
};

template <typename ValueT, DerivConfig DCONF, int NET_NINPUT, int NET_NOUTPUT, int N_IN, class LConf, class ... LCONFS>
struct LayerPackTuple_rec<ValueT, DCONF, NET_NINPUT, NET_NOUTPUT, N_IN, std::tuple<LConf, LCONFS...>>
template <typename ValueT, DerivConfig DCONF, int ORIG_NINPUT, int NET_NINPUT, int NET_NOUTPUT, int N_IN, class LConf, class ... LCONFS>
struct LayerPackTuple_rec<ValueT, DCONF, ORIG_NINPUT, NET_NINPUT, NET_NOUTPUT, N_IN, std::tuple<LConf, LCONFS...>>
{
private:
using layer = TemplLayer<ValueT, NET_NINPUT, NET_NOUTPUT, nbeta_next<LConf, LCONFS...>(), N_IN, LConf::noutput, typename LConf::ACTF_Type, DCONF>;
using rest = typename LayerPackTuple_rec<ValueT, DCONF, NET_NINPUT, NET_NOUTPUT, layer::noutput, std::tuple<LCONFS...>>::type;
using layer = TemplLayer<ValueT, ORIG_NINPUT, NET_NINPUT, NET_NOUTPUT, nbeta_next<LConf, LCONFS...>(), N_IN, LConf::noutput, typename LConf::ACTF_Type, DCONF>;
using rest = typename LayerPackTuple_rec<ValueT, DCONF, ORIG_NINPUT, NET_NINPUT, NET_NOUTPUT, layer::noutput, std::tuple<LCONFS...>>::type;
public:
using type = decltype(std::tuple_cat(
std::declval<std::tuple<layer>>(),
Expand All @@ -48,13 +48,13 @@ struct LayerPackTuple_rec<ValueT, DCONF, NET_NINPUT, NET_NOUTPUT, N_IN, std::tup
//
// Helps to determine the full layer tuple type according to LayerConfig pack
//
template <typename ValueT, DerivConfig DCONF, int NET_NINPUT, class LConf, class ... LCONFS>
template <typename ValueT, DerivConfig DCONF, int ORIG_NINPUT, int NET_NINPUT, class LConf, class ... LCONFS>
struct LayerPackTuple
{
private:
static constexpr int net_noutput = detail::net_nout<LConf, LCONFS...>();
using layer = TemplLayer<ValueT, NET_NINPUT, net_noutput, detail::nbeta_next<LConf, LCONFS...>(), NET_NINPUT, LConf::noutput, typename LConf::ACTF_Type, DCONF>;
using rest = typename detail::LayerPackTuple_rec<ValueT, DCONF, NET_NINPUT, net_noutput, layer::noutput, std::tuple<LCONFS...>>::type;
using layer = TemplLayer<ValueT, ORIG_NINPUT, NET_NINPUT, net_noutput, detail::nbeta_next<LConf, LCONFS...>(), NET_NINPUT, LConf::noutput, typename LConf::ACTF_Type, DCONF>;
using rest = typename detail::LayerPackTuple_rec<ValueT, DCONF, ORIG_NINPUT, NET_NINPUT, net_noutput, layer::noutput, std::tuple<LCONFS...>>::type;
public:
using type = decltype(std::tuple_cat(
std::declval<std::tuple<layer>>(),
Expand Down
25 changes: 13 additions & 12 deletions include/qnets/templ/TemplLayer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,23 +29,24 @@ struct LayerConfig

// The actual Layer class
//
template <typename ValueT, int NET_NINPUT, int NET_NOUTPUT, int NBETA_NEXT, int N_IN, int N_OUT, class ACTFType, DerivConfig DCONF>
template <typename ValueT, int ORIG_NINPUT,int NET_NINPUT, int NET_NOUTPUT, int NBETA_NEXT, int N_IN, int N_OUT, class ACTFType, DerivConfig DCONF>
class TemplLayer: public LayerConfig<N_OUT, ACTFType>
{
public:
// N_IN dependent sizes
static constexpr int ninput = N_IN;
static constexpr int nbeta = (N_IN + 1)*N_OUT;
static constexpr int orig_nin = ORIG_NINPUT;
static constexpr int net_nin = NET_NINPUT;
static constexpr int net_nout = NET_NOUTPUT;

// Sizes which also depend on DCONF
static constexpr StaticDFlags<DCONF> dconf{};

static constexpr int nd2 = dconf.d2
? NET_NINPUT*N_OUT
? ORIG_NINPUT*N_OUT
: 0; // number of forward-accumulated first/second order input derivative values
static constexpr int nd2_prev = dconf.d2 ? NET_NINPUT*N_IN : 0; // the same number of previous layer
static constexpr int nd2_prev = dconf.d2 ? ORIG_NINPUT*N_IN : 0; // the same number of previous layer

static_assert(NBETA_NEXT%(1 + N_OUT) == 0, ""); // -> BUG!
static constexpr int nout_next = NBETA_NEXT/(1 + N_OUT);
Expand Down Expand Up @@ -121,12 +122,12 @@ class TemplLayer: public LayerConfig<N_OUT, ACTFType>
for (int i = 0; i < N_OUT; ++i) {
for (int j = 0; j < N_IN; ++j) {
const ValueT bij = beta[1 + i*(N_IN + 1) + j];
for (int k = 0; k < NET_NINPUT; ++k) {
D1[i*NET_NINPUT + k] += bij*in_d1[j*NET_NINPUT + k];
D2[i*NET_NINPUT + k] += bij*in_d2[j*NET_NINPUT + k];
for (int k = 0; k < ORIG_NINPUT; ++k) {
D1[i*ORIG_NINPUT + k] += bij*in_d1[j*ORIG_NINPUT + k];
D2[i*ORIG_NINPUT + k] += bij*in_d2[j*ORIG_NINPUT + k];
}
}
for (int l = i*NET_NINPUT; l < (i + 1)*NET_NINPUT; ++l) {
for (int l = i*ORIG_NINPUT; l < (i + 1)*ORIG_NINPUT; ++l) {
D2[l] = _ad1[i]*D2[l] + _ad2[i]*D1[l]*D1[l];
D1[l] *= _ad1[i];
}
Expand All @@ -136,7 +137,6 @@ class TemplLayer: public LayerConfig<N_OUT, ACTFType>
// forward-accumulate second order deriv when the inputs correspond (besides shift/scale) to the true network inputs
constexpr void _computeD2_Input()
{
static_assert(N_IN == NET_NINPUT, "");
auto &D1 = *_d1_ptr;
auto &D2 = *_d2_ptr;
for (int i = 0; i < N_OUT; ++i) {
Expand All @@ -153,6 +153,7 @@ class TemplLayer: public LayerConfig<N_OUT, ACTFType>
{
// statically secure this call (i.e. using it on non-input layer will not compile)
static_assert(N_IN == NET_NINPUT, "[TemplLayer::ForwardInput] N_IN != NET_NINPUT");
static_assert(N_IN == ORIG_NINPUT, "[TemplLayer::ForwardInput] N_IN != ORIG_NINPUT");

dflags = dflags.AND(dconf); // AND static and dynamic conf
this->_computeOutput(input, dflags);
Expand All @@ -163,7 +164,7 @@ class TemplLayer: public LayerConfig<N_OUT, ACTFType>
}
}

// continue forward pass from previous layer
// continue/start forward pass from previous layer / external source
constexpr void _forwardLayer(const ValueT input[], const ValueT in_d1[], const ValueT in_d2[], DynamicDFlags dflags)
{
dflags = dflags.AND(dconf); // AND static and dynamic conf
Expand Down Expand Up @@ -303,9 +304,9 @@ class TemplLayer: public LayerConfig<N_OUT, ACTFType>
// Pointer: No bounds checking


// --- Propagation of input data (not layer)
// --- Propagation of original input data (not layer)

constexpr void ForwardInput(const std::array<ValueT, NET_NINPUT> &input, DynamicDFlags dflags)
constexpr void ForwardInput(const std::array<ValueT, ORIG_NINPUT> &input, DynamicDFlags dflags)
{
_forwardInput(input.begin(), dflags);
}
Expand All @@ -316,7 +317,7 @@ class TemplLayer: public LayerConfig<N_OUT, ACTFType>
}


// --- Forward Propagation of layer data
// --- Forward Propagation of layer data or external source

constexpr void ForwardLayer(const std::array<ValueT, N_IN> &input, const std::array<ValueT, nd2_prev> &in_d1, const std::array<ValueT, nd2_prev> &in_d2, DynamicDFlags dflags)
{
Expand Down
Loading

0 comments on commit 35475b5

Please sign in to comment.