#ifndef _HPP_LEARN #define _HPP_LEARN #include "simelement.hpp" #include "connection.hpp" #include "libcsim.hpp" #include "layer.hpp" class learning : public SimElement { protected: layer *TargetLayer, *SourceLayer; int ns, nt; // target and source layer dimensions int maxN_pre; int M; // number of postsynaptic connections per presynaptic neuron connection *con; float maxWeight, minWeight; int Dmax; int maximumDelay; short **delays_length; short ***delays; int **post; int **I_pre, **D_pre; int *N_pre; int **m_pre; pfloat **s_pre, **sd_pre; public: learning(connection*, float _maxWeight=1); ~learning(); virtual int proceede(int); virtual int prepare(int =0); // virtual void learning::SetMinWeight(float value); virtual void SetMinWeight(float value); // removed extra qualification learning:: virtual void SetMaxWeight(float value); float **sd, **s; }; class levylearning: public learning { protected: float **LTP1, **LTP2; float TauA, TauB, decA, decB; // decay and raise time constant for presynaptic potential float LearnRate; // float DeLearnFac; float LtpInc; public: levylearning(connection*, float =1, float =0.1, float =150, float =1.785, float _LtpInc=1); ~levylearning(); virtual int proceede(int); virtual int prepare(int =0); }; class izhlearning: public learning { protected: float **LTP; float *LTD; float dec_pre, dec_post; // decay of presynaptic and postsynaptic learning potential public: izhlearning(connection* con, float _maxWeight=1); ~izhlearning(); virtual int proceede(int =0); virtual int prepare(int =0); }; class LearnRossum1: public learning { protected: float **SourceLearnPot; float *TargetLearnPot; float dec_pre, dec_post; // decay of presynaptic and postsynaptic learning potential float Cp, Cd, SynNoiseSigma; public: LearnRossum1(connection* con, float _Cp=0.1, float _Cd=0.003, float _SynNoiseSigma=0.015, float _maxWeight=1); ~LearnRossum1(); virtual int proceede(int =0); virtual int prepare(int =0); }; class LearnAntiRossum1: public LearnRossum1 { protected: public: LearnAntiRossum1(connection* con, float _Cp=0.1, float _Cd=0.003, float _SynNoiseSigma=0.015, float _maxWeight=1); virtual int proceede(int =0); }; class LearnHebbLP2: public learning { protected: float **LTP; float LtpDecFac; float LtpInc; bool Accumulate; float LearnSpeed; float BaseLine; public: LearnHebbLP2(connection* con, float _MaxWeight=1, float TauDec=20, float BaseLine=0.1, float _LearnSpeed=0.001, bool _Accumulate=true); ~LearnHebbLP2(); virtual int proceede(int =0); virtual int prepare(int =0); virtual int WriteSimInfo(fstream &fw); }; class LearnHebbLP2_norm: public learning { protected: float **LTP; float LtpDecFac; float LtpInc; float LearnSpeed; float BaseLine; bool Normalize; float WeightSum; public: LearnHebbLP2_norm(connection* con, float _MaxWeight=1, float TauDec=20, float BaseLine=0.1, float _LearnSpeed=0.001, float _WeightSum=0); ~LearnHebbLP2_norm(); virtual int proceede(int =0); virtual int prepare(int =0); int NormalizeWeights(); }; class LearnPrePost: public learning { protected: float **LPpre; //presynaptic potentials float *LPpost; // postsynaptic potentials float dec_pre, dec_post; // decay of presynaptic and postsynaptic learning potential public: LearnPrePost(connection* con, float _maxWeight=1, float TauLearnPre=30, float TauLearnPost=30); ~LearnPrePost(); virtual int proceede(int =0); virtual int prepare(int =0); }; class LearnFBInh: public LearnPrePost { protected: float Cpre, Cdep; public: LearnFBInh(connection* con, float _maxWeight=1, float TauLearnPre=30, float TauLearnPost=30, float _Cpre=1, float _Cdep=0.03); virtual int proceede(int =0); }; class LearnFWInh: public LearnPrePost { protected: float Cpre, Cdep; public: LearnFWInh(connection* con, float _maxWeight=1, float TauLearnPre=30, float TauLearnPost=30, float _Cpre=1, float _Cdep=0.03); virtual int proceede(int =0); }; // wie LearnHebbLP2, aber mit st�rkerer Gewichts�nderung bei hoher // postsynaptischer Rate (d.h. Multiplikation der lernrate mit dem Postsynaptischen Spike Trace) // daher auch von LearnPrePost abgeleitet class LearnHebbLP3: public LearnPrePost { protected: float LearnRate, BaseLine; float LtpInc; bool Accumulate; public: LearnHebbLP3(connection* con, float _maxWeight=1, float TauLearnPre=30, float TauLearnPost=30, float _BaseLine=0.2, float _LearnRate=0.0001, bool _Accumulate=false); virtual int proceede(int =0); }; class LearnAntiHebb: public LearnPrePost { protected: float Cpre, Cdep; public: LearnAntiHebb(connection* con, float _maxWeight=1, float TauLearnPre=30, float TauLearnPost=30, float _Cpre=1, float _Cdep=0.03); virtual int proceede(int =0); }; // Learning rule according to Bi&Poo 1998, implemented as in Shon, Rao, Sejnowski 2004 // using alpha functions for positive and negative part of learning window // calculate alpha function in constructor and save in lookup table // save spike times for every neuron // every pre and postsynaptic spike is a learning event class LearnBiPoo: public learning { protected: int** LastSpikesPre; // arrays for time of last spike at presynaptic site int* LastSpikesPost; // array for time of last spike of postsynaptic neurons float* PreLearnWindowLut; // lookup table for learning window float* PostLearnWindowLut; // lookup table for learning window float LearnRate, TauPeak1, TauPeak2, Amp1, Amp2; int PreLutN, PostLutN; public: LearnBiPoo(connection* con, float _maxWeight=1, float LearnRate=0.001, float TauPeak1=10, float TauPeak2=10, float Amp1=-1.25, float Amp2=1); ~LearnBiPoo(); virtual int proceede(int TotalTime); virtual int prepare(int step); }; // Learning rule according to Froemke & Dan 2002 // similar to Bi&Poo but additionally for each spike a spike efficacy is calculated // 0<=Eff<=1 // after each spike Eff is set to zero and then falls back to 1 exponentially // TauPre=28ms // TauPost=88ms // ERROR: not correctly implemented // because only neares neighbor interactions are used // but Froemke/Dan did not limit the interactions to neares neighbors // should be implemented using decaying potentials class LearnFroemkeDan: public learning { protected: int** LastSpikesPre; // arrays for time of last spike at presynaptic site int* LastSpikesPost; // array for time of last spike of postsynaptic neurons int** SecondLastSpikesPre; // time of second last spike at presynaptic site int* SecondLastSpikesPost; // time of second last spike of postsynaptic neurons float* PreLearnWindowLut; // lookup table for learning window float* PostLearnWindowLut; // lookup table for learning window int PreLutN, PostLutN; float LearnRate, TauPeak1, TauPeak2, Amp1, Amp2; float* PreEfficacyLut; // lookup table for spike efficacy float* PostEfficacyLut; // lookup table for spike efficacy float TauPreEff, TauPostEff; int PreEffLutN, PostEffLutN; public: LearnFroemkeDan(connection* con, float _maxWeight=1, float LearnRate=0.001, float PreEffTau=28, float PostEffTau=88, float TauPeak1=35, float TauPeak2=15, float Amp1=-.5, float Amp2=1); ~LearnFroemkeDan(); virtual int proceede(int TotalTime); virtual int prepare(int step); }; // Learning rule inspired by Sj�str�m, Turrigiano, Nelson 2001 // only nearest neighbor interactions as in LearnFroemkeDan and LearnBiPoo // frequency dependence (??) // LTP "winns" over LTD // Dependence of LTP on initial synaptic strength (stronger synapse, smaller relative change // --> additive weight change as in LearnRossum1?? see Rossum, Bi, turrigano 2000) class LearnSjoestroem: public learning { protected: int** LastSpikesPre; // arrays for time of last spike at presynaptic site int** LastSpikesPost; // array for time of last spike of postsynaptic neurons float* LTPotLearnWindowLut; // lookup table for learning window float* LTDepLearnWindowLut; // lookup table for learning window int LTPotLutN, LTDepLutN; float LearnRate, TauLTDep, TauLTPot, Amp1, Amp2; public: LearnSjoestroem(connection* con, float _maxWeight=1, float LearnRate=0.001, float TauLTDep=35, float TauLTPot=15, float AmpLTDep=-.5, float AmpLTPot=0.2); ~LearnSjoestroem(); virtual int proceede(int TotalTime); virtual int prepare(int step); }; #endif /*_HPP_LEARN */