123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244 |
- /*Copyright (C) 2005, 2006, 2007, 2008 Frank Michler, Philipps-University Marburg, Germany
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License
- as published by the Free Software Foundation; either version 2
- of the License, or (at your option) any later version.
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
- #include "sys.hpp" // for libcwd
- #include "debug.hpp" // for libcwd
- #include "vlearn.hpp"
- veclearning::veclearning(VecConnection* c, float _maxWeight): SimElement(seLearning), con(c), maxWeight(_maxWeight), minWeight(0)
- {
- Name="VecLearning";
- cout << "Initialize VecLearning Object, maxWeight= " << maxWeight << "\n";
- int i,j,k;
- VecConnectionInfo ConInf = con->GetConnectionInfo();
- Dmax = ConInf.Dmax;
- maximumDelay = ConInf.maximumDelay;
-
- cout << "Dmax=" << Dmax << "\n";
- TargetLayer = ConInf.TargetLayer;
- SourceLayer = ConInf.SourceLayer;
- nt = TargetLayer->N;
- ns = SourceLayer->N;
- dt = TargetLayer->GetDt();
- MacroTimeStep = TargetLayer->GetMacroTimeStep();
- cout << "Initialize VecLearning Object: N_Target =" << nt << "\n";
-
- PSynWeights=ConInf.PSynWeights;
- PSynTargetNr=ConInf.PSynTargetNr;
- PSynSourceNr=ConInf.PSynSourceNr;
- PSynDelays=ConInf.PSynDelays;
- PPreSynNr=ConInf.PPreSynNr;
- Pdelays=ConInf.Pdelays;
- }
- veclearning::~veclearning()
- {
- cout << "veclearning Destructor\n"; fflush(stdout);
- }
- ///////////////////////////////////////
- VecLearnHebbLP2::VecLearnHebbLP2(VecConnection* con, float _MaxWeight, float TauDec, float _BaseLine, float _LearnSpeed, bool _Accumulate): veclearning(con, _MaxWeight), BaseLine(_BaseLine), LearnSpeed(_LearnSpeed), LtpInc(1), Accumulate(_Accumulate)
- {
- LtpDecFac = exp(-dt/TauDec);
- int i,j,k;
- NewArray2d(LTP,ns,1001+Dmax); // presynaptic trace (source)
- for (i=0;i<ns;i++) for (j=0;j<1001+Dmax;j++) LTP[i][j]=0.0;
- cout << "VecLearnHebbLP2\n";
- cout << "LearnSpeed = " << LearnSpeed << " TauDec=" << TauDec << "\n";
- cout << "Accumulate=" << Accumulate << "\n";
- cout << "initialize LTP\n";
- fflush(stdout);
- }
- VecLearnHebbLP2::~VecLearnHebbLP2()
- {
- DeleteArray2d(LTP,ns);
- }
- int VecLearnHebbLP2::WriteSimInfo(fstream &fw)
- {
- fw << "<" << seTypeString << " id=\"" << IdNumber << "\" Type=\"" << seType << "\" Name=\"" << Name << "\"> \n";
- fw << "</VecLearnHebbLP2> \n";
- fw << "<LearnConnection id=\"" << con->IdNumber << "\"/> \n";
- fw << "<MaxWeight Value=\"" << maxWeight << "\"/> \n";
- fw << "<LtpDecFac Value=\"" << LtpDecFac << "\"/> \n";
- fw << "<BaseLine Value=\"" << BaseLine << "\"/> \n";
- fw << "<LearnSpeed Value=\"" << LearnSpeed << "\"/> \n";
- fw << "<Accumulate Value=\"" << Accumulate << "\"/> \n";
- fw << "<LtpInc Value=\"" << LtpInc << "\"/> \n";
- fw << "</" << seTypeString << "> \n";
- }
- int VecLearnHebbLP2::proceede(int TotalTime)
- {
- // cout <<"L";fflush(stdout); //remove
- int t = int(TotalTime % MacroTimeStep);
- int i,j,k;
- // increase learning potentials for each spike of last time step
- int spike = SourceLayer->last_N_firings;
- if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
- {
- cerr << "this should never happen, I just don't know why ;-)\n";
- cerr << "programming error, wrong firing indices \n";
- cerr << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
- cerr << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
- cerr << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
- }
-
- // update presynaptic learning potential (for ltp)
- if (Accumulate) while (spike < SourceLayer->N_firings) LTP[SourceLayer->firings[spike++][1]][t+Dmax] += LtpInc;
- else while (spike < SourceLayer->N_firings) LTP[SourceLayer->firings[spike++][1]][t+Dmax] = LtpInc;
- // END update
-
- spike = TargetLayer->last_N_firings;
- while (spike < TargetLayer->N_firings)
- {
- i= TargetLayer->firings[spike++][1];
- // calculate LTP
- for (j=0;j<(*PPreSynNr)[i].size();j++) {
- T_NSynapses SynNr=(*PPreSynNr)[i][j];
- (*PSynWeights)[SynNr] += LearnSpeed*(LTP[(*PSynSourceNr)[SynNr]][t+Dmax-(*PSynDelays)[SynNr]-1] - BaseLine);
- if ((*PSynWeights)[SynNr] > maxWeight) {
- (*PSynWeights)[SynNr]=maxWeight;
- } else {
- if ((*PSynWeights)[SynNr] < minWeight) {
- (*PSynWeights)[SynNr]=minWeight;
- }
- }
- }
- }
- for (i=0;i<ns;++i) LTP[i][t+Dmax+1]= LTP[i][t+Dmax] * LtpDecFac;
- }
- int VecLearnHebbLP2::prepare(int step)
- {
- int i,j,k;
- for (i=0;i<ns;i++) // prepare for the next sec
- for (j=0;j<Dmax+1;j++)
- LTP[i][j]=LTP[i][MacroTimeStep+j];
- }
- //////////////////////////////////////
- ///////////////////////////////////
- VecLearnPrePost::VecLearnPrePost(VecConnection* con, float _maxWeight, float _TauLearnPre, float _TauLearnPost): veclearning(con, _maxWeight)
- {
- int i,j,k;
- NewArray2d(LPpre,ns,MacroTimeStep+1+Dmax); // presynaptic trace (source)
- LPpost = new float[nt]; // postsynaptic trace (target)
- dec_pre = exp(-dt/_TauLearnPre);
- dec_pre = exp(-dt/_TauLearnPost);
- cout << "initialize LPpre\n";
- fflush(stdout);
- for (i=0;i<ns;i++) for (j=0;j<MacroTimeStep+1+Dmax;j++) LPpre[i][j]=0.0;
- cout << "initialize LPpost\n"; fflush(stdout);
- for (i=0;i<nt;i++) LPpost[i]=0.0;
- cout << "initialized LPpost\n"; fflush(stdout);
- }
- VecLearnPrePost::~VecLearnPrePost()
- {
- cout << "VecLearnPrePost Destructor\n";fflush(stdout);
- DeleteArray2d(LPpre, ns);
- delete [] LPpost;
- }
- int VecLearnPrePost::prepare(int step)
- {
- int i,j,k;
- for (i=0;i<ns;i++) // prepare for the next sec
- for (j=0;j<Dmax+1;j++)
- LPpre[i][j]=LPpre[i][MacroTimeStep+j];
- }
- ///////////////////////
- VecLearnHebbLP3::VecLearnHebbLP3(VecConnection* con, float _maxWeight, float _TauLearnPre, float _TauLearnPost, float _BaseLine, float _LearnRate, bool _Accumulate): VecLearnPrePost(con, _maxWeight, _TauLearnPre, _TauLearnPost), LearnRate(_LearnRate), BaseLine(_LearnRate*_BaseLine), Accumulate(_Accumulate)
- {
- cout << "Initialize VecLearnHebbLP3\n";
- BaseLine = BaseLine*LearnRate;
- // see comment in LearnHebbLP3::proceede(int TotalTime)
- }
- int VecLearnHebbLP3::proceede(int TotalTime)
- {
- // learning rule: dw/dt = LearnRate*(LPpre-Lbaseline)*LPpost
- // in this implementation learn rate is used as LPpreInc
- // this is equivalent to the above formula if Lbaseline is adjusted correspondingly
- // Lbaseline_corr = Lbaseline*LearnRate
- // the advantage is, that there is one multiplication (with *LearnRate) less in the loop!
- // if const sum normalization is done Lbaseline shouldn't have any effect anyway.
-
- int t = int(TotalTime % MacroTimeStep);
- int i,j,k;
- // increase learning potentials for each spike of last time step
- int spike = SourceLayer->last_N_firings;
- if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
- {
- cout << "programming error, wrong firing indices \n";
- cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
- cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
- cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
- }
-
- // update presynaptic learning potential (for ltp)
- while (spike < SourceLayer->N_firings) LPpre[SourceLayer->firings[spike++][1]][t+Dmax] += LearnRate;
- // update
- spike = TargetLayer->last_N_firings;
- // cout << "ltp ";fflush(stdout);
- while (spike < TargetLayer->N_firings)
- {
- i= TargetLayer->firings[spike++][1];
- LPpost[i]+=1;
- // calculate LTP
- for (j=0;j<(*PPreSynNr)[i].size();j++) {
- T_NSynapses SynNr=(*PPreSynNr)[i][j];
- (*PSynWeights)[SynNr] += LPpost[i]*(LPpre[(*PSynSourceNr)[SynNr]][t+Dmax-(*PSynDelays)[SynNr]-1]-BaseLine);
- if ((*PSynWeights)[SynNr] > maxWeight) (*PSynWeights)[SynNr] = maxWeight;
- if ((*PSynWeights)[SynNr] < minWeight) (*PSynWeights)[SynNr] = minWeight;
- }
- // this spike was after pre-synaptic spikes
-
- // update postsynaptic potential
- }
-
- // decreas synaptic potentials
- for (i=0;i<ns;++i) LPpre[i][t+Dmax+1]= LPpre[i][t+Dmax] * dec_pre;
- for (i=0;i<nt;++i) LPpost[i]*=dec_post;
- }
- /////////////////////////////////////////////////////////
|