/*Copyright (C) 2005, 2006, 2007 Frank Michler, Philipps-University Marburg, Germany This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "sys.hpp" // for libcwd #include "debug.hpp" // for libcwd #include "learn.hpp" #include #include //////////////////////////////// //////////////////////////////// learning::learning(connection* c, float _maxWeight): SimElement(seLearning), con(c), maxWeight(_maxWeight), minWeight(0) { Name="Learning"; cout << "Initialize Learning Object, maxWeight= " << maxWeight << "\n"; int i,j,k; ConnectionInfo ConInf = con->GetConnectionInfo(); Dmax = ConInf.Dmax; maximumDelay = ConInf.maximumDelay; cout << "Dmax=" << Dmax << "\n"; TargetLayer = ConInf.TargetLayer; SourceLayer = ConInf.SourceLayer; nt = TargetLayer->N; ns = SourceLayer->N; dt = TargetLayer->GetDt(); MacroTimeStep = TargetLayer->GetMacroTimeStep(); cout << "Initialize Learning Object: N_Target =" << nt << "\n"; post = ConInf.post; delays = ConInf.delays; delays_length = ConInf.delays_length; M = ConInf.M; // NewArray2d(sd,ns,M); // for (i=0;ilast_N_firings; if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings N_firings )) { cout << "programming error, wrong firing indices \n"; cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n"; cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n"; cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n"; } // update presynaptic learning potential (for ltp) while (spike < SourceLayer->N_firings) { // update learning pot LTP1[SourceLayer->firings[spike][1]][t+Dmax] = LtpInc; // no accumulation!! LTP2[SourceLayer->firings[spike++][1]][t+Dmax] = LtpInc; } // update spike = TargetLayer->last_N_firings; while (spike < TargetLayer->N_firings) { i= TargetLayer->firings[spike++][1]; // LTD[i]=0.12; // calculate LTP for (j=0;j maxWeight) *s_pre[i][j]=maxWeight; } // this spike was after pre-synaptic spikes } // decrease potentials for (i=0;irecord(dt*TotalTime, (LTP1[0][t]-LTP2[0][t]), (LTP1[ns/2][t]-LTP2[ns/2][t])); } int levylearning::prepare(int step) { int i,j,k; // cout << "levylearning::prepare()\n"; for (i=0;i 0.0) // // cout << sd[i][j] << " "; // // s[i][j]+= sd[i][j]; // // if (s[i][j]>maxWeight) s[i][j]=maxWeight; // // if (s[i][j]<0) s[i][j]=0.0;w // // sd[i][j] = 0; // reset weight derivatives // } } //////////////////////// izhlearning::izhlearning(connection* con, float _maxWeight): learning(con, _maxWeight) { int i,j,k; NewArray2d(LTP,ns,1001+Dmax); // presynaptic trace (source) LTD = new float[nt]; // postsynaptic trace (target) float TAULEARN_pre=50; dec_pre = exp(-dt/TAULEARN_pre); float TAULEARN_post=5; dec_post = exp(-dt/TAULEARN_post); cout << "initialize LTP\n"; fflush(stdout); for (i=0;ilast_N_firings; if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings N_firings )) { cout << "programming error, wrong firing indices \n"; cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n"; cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n"; cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n"; } // update presynaptic learning potential (for ltp) while (spike < SourceLayer->N_firings) LTP[SourceLayer->firings[spike++][1]][t+Dmax] += 0.12; // update spike = TargetLayer->last_N_firings; while (spike < TargetLayer->N_firings) { i= TargetLayer->firings[spike++][1]; LTD[i]=0.12; // calculate LTP for (j=0;j 0) cout << *sd_pre[i][j] << " "; } // this spike was after pre-synaptic spikes } for (i=0;iN_firings; // while (t-SourceLayer->firings[--k][0] < Dmax) // { // for (j=0; j< delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]]; j++) // { // i=post[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]]; // sd[SourceLayer->firings[k][1]][delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]]-=LTD[i]; // learning (ToDo: turn learning off/on) // } // } } int izhlearning::prepare(int step) { int i,j,k; cout << "learning::prepare()\n"; for (i=0;i 0.0) // cout << sd[i][j] << " "; sd[i][j]*=0.9; s[i][j]+=0.01+sd[i][j]; if (s[i][j]>maxWeight) s[i][j]=maxWeight; if (s[i][j]<0) s[i][j]=0.0; } } //////////////////////// LearnRossum1::LearnRossum1(connection* con, float _Cp, float _Cd, float _SynNoiseSigma, float _maxWeight): learning(con, _maxWeight), Cp(_Cp), Cd(_Cd), SynNoiseSigma(_SynNoiseSigma) { int i,j,k; NewArray2d(SourceLearnPot,ns,MacroTimeStep+1+Dmax); // presynaptic trace (source) TargetLearnPot = new float[nt]; // postsynaptic trace (target) float TAULEARN_pre=20; dec_pre = exp(-dt/TAULEARN_pre); float TAULEARN_post=20; dec_post = exp(-dt/TAULEARN_post); // Cd = Cp*0.000428571; // Cd = 0.003; // SynNoiseSigma = 0.015; cout << "initialize SourceLearnPot\n"; fflush(stdout); for (i=0;ilast_N_firings; if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings N_firings )) { cout << "programming error, wrong firing indices \n"; cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n"; cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n"; cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n"; } // update presynaptic learning potential (for ltp) while (spike < SourceLayer->N_firings) SourceLearnPot[SourceLayer->firings[spike++][1]][t+Dmax] += 1.0; // update spike = TargetLayer->last_N_firings; // cout << "ltp ";fflush(stdout); while (spike < TargetLayer->N_firings) { i= TargetLayer->firings[spike++][1]; TargetLearnPot[i]=1; // calculate LTP for (j=0;j 0) cout << " BIGG"; // cout << Cp* SourceLearnPot[I_pre[i][j]][t+Dmax-D_pre[i][j]-1] << " "; } // this spike was after pre-synaptic spikes } for (i=0;iN_firings; while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!! { int SourceI=SourceLayer->firings[k][1]; // cout << "startwhile k=" << k; fflush(stdout); // cout << "maximumDelay=" << maximumDelay << " SourceI=" << SourceI; // cout << " delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]]=" << delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]] << "\n"; fflush(stdout); for (j=0; j< delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]]; j++) { // cout << "before j=" << j << "i=" << i << "CW=" << *CurWeight << "k=" << k << "\n";fflush(stdout); CurWeight = &s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]]; // // cout << "hereithappens"; fflush(stdout); i=post[SourceLayer->firings[k][1]][delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]]; // cout << "TargetLearnPot[i]=" << TargetLearnPot[i]; fflush(stdout); // if ((i<0) || (i>=nt)) { // cout << "i out of range\n"; // fflush(stdout); // } else // { // cout << "TargetLearnPot[i]=" << TargetLearnPot[i]; // fflush(stdout); // } // cout << "j=" << j << "i=" << i << "CW=" << *CurWeight << "\n";fflush(stdout); // cout << "TargetLearnPot[i]=" << TargetLearnPot[i]; fflush(stdout); // *CurWeight -= *CurWeight*Cd*TargetLearnPot[i]; // float DeltaW = *CurWeight*(-Cd +gsl_ran_gaussian(gslr, SynNoiseSigma))* TargetLearnPot[i]; // cout << DeltaW << " " << TargetLearnPot[i] << "|"; fflush(stdout); *CurWeight +=*CurWeight*(-Cd +gsl_ran_gaussian(gslr, SynNoiseSigma))* TargetLearnPot[i]; if (*CurWeight < 0) *CurWeight=0; // s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]] -= TargetLearnPot[i]; // learning (ToDo: turn learning off/on) // cout << "after j=" << j << "i=" << i << "CW=" << *CurWeight << "\n";fflush(stdout); } } // cout << "fin "; fflush(stdout); } int LearnRossum1::prepare(int step) { int i,j,k; for (i=0;ilast_N_firings; if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings N_firings )) { cout << "programming error, wrong firing indices \n"; cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n"; cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n"; cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n"; } // update presynaptic learning potential (for ltp) while (spike < SourceLayer->N_firings) SourceLearnPot[SourceLayer->firings[spike++][1]][t+Dmax] = 1.0; // update spike = TargetLayer->last_N_firings; // cout << "ltp ";fflush(stdout); while (spike < TargetLayer->N_firings) { i= TargetLayer->firings[spike++][1]; TargetLearnPot[i]=1; // calculate LTP for (j=0;jN_firings; while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!! { int SourceI=SourceLayer->firings[k][1]; for (j=0; j< delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]]; j++) { CurWeight = &s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]]; i=post[SourceLayer->firings[k][1]][delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]]; *CurWeight +=(Cp +*CurWeight*gsl_ran_gaussian(gslr, SynNoiseSigma))* TargetLearnPot[i]; if (*CurWeight < 0) *CurWeight=0; } } } /////////////////////////////////////// LearnHebbLP2::LearnHebbLP2(connection* con, float _MaxWeight, float TauDec, float _BaseLine, float _LearnSpeed, bool _Accumulate): learning(con, _MaxWeight), BaseLine(_BaseLine), LearnSpeed(_LearnSpeed), LtpInc(1), Accumulate(_Accumulate) { LtpDecFac = exp(-dt/TauDec); int i,j,k; NewArray2d(LTP,ns,1001+Dmax); // presynaptic trace (source) cout << "LearnHebbLP2\n"; cout << "LearnSpeed = " << LearnSpeed << " TauDec=" << TauDec << "\n"; cout << "Accumulate=" << Accumulate << "\n"; cout << "initialize LTP\n"; fflush(stdout); for (i=0;i \n"; fw << "IdNumber << "\"/> \n"; fw << " \n"; fw << " \n"; fw << " \n"; fw << " \n"; fw << " \n"; fw << " \n"; fw << " \n"; } int LearnHebbLP2::proceede(int TotalTime) { // cout <<"L";fflush(stdout); //remove int t = int(TotalTime % MacroTimeStep); int i,j,k; // increase learning potentials for each spike of last time step int spike = SourceLayer->last_N_firings; if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings N_firings )) { cout << "programming error, wrong firing indices \n"; cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n"; cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n"; cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n"; } // update presynaptic learning potential (for ltp) if (Accumulate) while (spike < SourceLayer->N_firings) LTP[SourceLayer->firings[spike++][1]][t+Dmax] += LtpInc; else while (spike < SourceLayer->N_firings) LTP[SourceLayer->firings[spike++][1]][t+Dmax] = LtpInc; // update spike = TargetLayer->last_N_firings; while (spike < TargetLayer->N_firings) { i= TargetLayer->firings[spike++][1]; // calculate LTP for (j=0;j maxWeight) *s_pre[i][j]=maxWeight; else if (*s_pre[i][j] < minWeight) *s_pre[i][j]=minWeight; // if (*sd_pre[i][j] > 0) cout << *sd_pre[i][j] << " "; } } for (i=0;imaxWeight) s[i][j]=maxWeight; // if (s[i][j]<0) s[i][j]=0.0; // } } ////////////////////////////////////// LearnHebbLP2_norm::LearnHebbLP2_norm(connection* con, float _MaxWeight, float TauDec, float _BaseLine, float _LearnSpeed, float _WeightSum): learning(con, _MaxWeight), BaseLine(_BaseLine), LearnSpeed(_LearnSpeed), LtpInc(1), WeightSum(_WeightSum) { LtpDecFac = exp(-dt/TauDec); int i,j,k; NewArray2d(LTP,ns,1001+Dmax); // presynaptic trace (source) cout << "LearnHebbLP2_norm\n"; cout << "LearnSpeed = " << LearnSpeed << " WeightSum=" << WeightSum << "\n"; cout << "initialize LTP\n"; fflush(stdout); for (i=0;ilast_N_firings; if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings N_firings )) { cout << "programming error, wrong firing indices \n"; cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n"; cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n"; cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n"; } // update presynaptic learning potential (for ltp) while (spike < SourceLayer->N_firings) LTP[SourceLayer->firings[spike++][1]][t+Dmax] += LtpInc; // update spike = TargetLayer->last_N_firings; float CurWeightSum; // normalize while (spike < TargetLayer->N_firings) { i= TargetLayer->firings[spike++][1]; // calculate LTP CurWeightSum=0; for (j=0;j 0) cout << *sd_pre[i][j] << " "; } // normalize if (CurWeightSum > 0) for(j=0;jmaxWeight) s[i][j]=maxWeight; // if (s[i][j]<0) s[i][j]=0.0; // } } int LearnHebbLP2_norm::NormalizeWeights() { int i,j; cout << "Normalize Weights"; for(i=0;i 0) for(j=0;jlast_N_firings; if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings N_firings )) { cout << "programming error, wrong firing indices \n"; cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n"; cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n"; cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n"; } // update presynaptic learning potential (for ltp) while (spike < SourceLayer->N_firings) LPpre[SourceLayer->firings[spike++][1]][t+Dmax] += LearnRate; // update spike = TargetLayer->last_N_firings; // cout << "ltp ";fflush(stdout); while (spike < TargetLayer->N_firings) { i= TargetLayer->firings[spike++][1]; LPpost[i]+=1; // calculate LTP for (j=0;j maxWeight) *s_pre[i][j] = maxWeight; if (*s_pre[i][j] < minWeight) *s_pre[i][j] = minWeight; } // this spike was after pre-synaptic spikes // update postsynaptic potential } // decreas synaptic potentials for (i=0;ipost pairing (exp learning potential) // LTD: for every presynaptic spike, proportional to current weight int LearnFBInh::proceede(int TotalTime) { int t = int(TotalTime % MacroTimeStep); int i,j,k; // increase learning potentials for each spike of last time step int spike = SourceLayer->last_N_firings; if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings N_firings )) { cout << "programming error, wrong firing indices \n"; cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n"; cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n"; cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n"; } // update presynaptic learning potential (for ltp) while (spike < SourceLayer->N_firings) LPpre[SourceLayer->firings[spike++][1]][t+Dmax] = Cpre; // update spike = TargetLayer->last_N_firings; // cout << "ltp ";fflush(stdout); while (spike < TargetLayer->N_firings) { i= TargetLayer->firings[spike++][1]; LPpost[i]=1; // calculate LTP for (j=0;j maxWeight) *s_pre[i][j] = maxWeight; } // this spike was after pre-synaptic spikes } for (i=0;iN_firings; while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!! { int SourceI=SourceLayer->firings[k][1]; for (j=0; j< delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]]; j++) { CurWeight = &s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]]; i=post[SourceLayer->firings[k][1]][delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]]; // *CurWeight -= *CurWeight*Cd*LPpost[i]; // float DeltaW = *CurWeight*(-Cd +gsl_ran_gaussian(gslr, SynNoiseSigma))* LPpost[i]; // cout << DeltaW << " " << LPpost[i] << "|"; fflush(stdout); *CurWeight -= *CurWeight*Cdep; //*(1-LPpost[i]); if (*CurWeight < 0) *CurWeight=0; // s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]] -= LPpost[i]; // learning (ToDo: turn learning off/on) // cout << "after j=" << j << "i=" << i << "CW=" << *CurWeight << "\n";fflush(stdout); } } // cout << "fin "; fflush(stdout); } ////////////////////////////////////////////// LearnFWInh::LearnFWInh(connection* con, float _maxWeight, float TauLearnPre, float TauLearnPost, float _Cpre, float _Cdep): LearnPrePost(con, _maxWeight, TauLearnPre, TauLearnPost), Cpre(_Cpre), Cdep(_Cdep) { cout << "LearnFWInh: MaxWeight=" << maxWeight << "\n"; } // LTD: according to pre->post pairing (exp learning potential), proportional to current weight // LTP: for every postsynaptic spike int LearnFWInh::proceede(int TotalTime) { int t = int(TotalTime % MacroTimeStep); int i,j,k; // increase learning potentials for each spike of last time step int spike = SourceLayer->last_N_firings; if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings N_firings )) { cout << "programming error, wrong firing indices \n"; cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n"; cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n"; cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n"; } // update presynaptic learning potential (for ltp) while (spike < SourceLayer->N_firings) LPpre[SourceLayer->firings[spike++][1]][t+Dmax] = Cpre; // update spike = TargetLayer->last_N_firings; while (spike < TargetLayer->N_firings) { i= TargetLayer->firings[spike++][1]; LPpost[i]=1; // calculate LTP for (j=0;jN_firings; while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!! { int SourceI=SourceLayer->firings[k][1]; for (j=0; j< delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]]; j++) { CurWeight = &s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]]; i=post[SourceLayer->firings[k][1]][delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]]; *CurWeight += *CurWeight*Cpre; if (*CurWeight < 0) *CurWeight=0; } } } ///////////////////////////////////////////////////////// LearnAntiHebb::LearnAntiHebb(connection* con, float _maxWeight, float TauLearnPre, float TauLearnPost, float _Cpre, float _Cdep): LearnPrePost(con, _maxWeight, TauLearnPre, TauLearnPost), Cpre(_Cpre), Cdep(_Cdep) { cout << "LearnAntiHebb: MaxWeight=" << maxWeight << "\n"; } // Dummy// Dummy// Dummy// Dummy// Dummy// Dummy// Dummy // this object is not in use yet (use Learnhebblp2 instead) int LearnAntiHebb::proceede(int TotalTime) { int t = int(TotalTime % MacroTimeStep); int i,j,k; // increase learning potentials for each spike of last time step int spike = SourceLayer->last_N_firings; if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings N_firings )) { cout << "programming error, wrong firing indices \n"; cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n"; cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n"; cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n"; } // update presynaptic learning potential (for ltp) while (spike < SourceLayer->N_firings) LPpre[SourceLayer->firings[spike++][1]][t+Dmax] = 1; // update spike = TargetLayer->last_N_firings; // cout << "ltp ";fflush(stdout); while (spike < TargetLayer->N_firings) { i= TargetLayer->firings[spike++][1]; LPpost[i]=1; // calculate LTP for (j=0;j maxWeight) *s_pre[i][j] = maxWeight; } // this spike was after pre-synaptic spikes } for (i=0;iN_firings; while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!! { int SourceI=SourceLayer->firings[k][1]; for (j=0; j< delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]]; j++) { CurWeight = &s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]]; i=post[SourceLayer->firings[k][1]][delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]]; // *CurWeight -= *CurWeight*Cd*LPpost[i]; // float DeltaW = *CurWeight*(-Cd +gsl_ran_gaussian(gslr, SynNoiseSigma))* LPpost[i]; // cout << DeltaW << " " << LPpost[i] << "|"; fflush(stdout); *CurWeight -=Cdep*(1-LPpost[i]); if (*CurWeight < 0) *CurWeight=0; // s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]] -= LPpost[i]; // learning (ToDo: turn learning off/on) // cout << "after j=" << j << "i=" << i << "CW=" << *CurWeight << "\n";fflush(stdout); } } // cout << "fin "; fflush(stdout); } //////////////////////////LearnBiPoo LearnBiPoo::LearnBiPoo(connection* con, float _maxWeight, float _LearnRate, float _TauPeak1, float _TauPeak2, float _Amp1, float _Amp2) : learning(con, _maxWeight), LearnRate(_LearnRate), TauPeak1(_TauPeak1), TauPeak2(_TauPeak2), Amp1(_Amp1), Amp2(_Amp2), LastSpikesPre(0), LastSpikesPost(0), PreLearnWindowLut(0), PostLearnWindowLut(0) { // Initialize lookup tables for learning window PreLearnWindowLut = AlphaFktLut(PreLutN, TauPeak1, LearnRate*Amp1, 0, dt); PostLearnWindowLut = AlphaFktLut(PostLutN, TauPeak2, LearnRate*Amp2, 0, dt); // initialize arrays for last spikes NewArray2d(LastSpikesPre, ns, M); for (int i=0; ilast_N_firings; while (spike < TargetLayer->N_firings) { i= TargetLayer->firings[spike++][1]; LastSpikesPost[i] = TotalTime; // calculate LTP for (j=0;j0) && (TDiff maxWeight) *s_pre[i][j] = maxWeight; if (*s_pre[i][j] < minWeight) *s_pre[i][j] = minWeight; } } } // learn all presynaptic neurons with this time step arriving spikes float* CurWeight; int CurDelay; int PostI; int SynNumber; k=SourceLayer->N_firings; while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!! { SourceI=SourceLayer->firings[k][1]; CurDelay=t-SourceLayer->firings[k][0]; for (j=0; j< delays_length[SourceI][CurDelay]; j++) { SynNumber = delays[SourceI][CurDelay][j]; LastSpikesPre[SourceI][SynNumber] = TotalTime; CurWeight = &s[SourceI][SynNumber]; i=post[SourceI][SynNumber]; TDiff = TotalTime - LastSpikesPost[i]; if ((TDiff > 0) && (TDiff < PreLutN)) { *CurWeight += PreLearnWindowLut[TDiff]; if (*CurWeight > maxWeight) *CurWeight = maxWeight; if (*CurWeight < minWeight) *CurWeight = minWeight; } } } } LearnBiPoo::~LearnBiPoo() { DeleteArray2d(LastSpikesPre, ns); delete [] LastSpikesPost; delete [] PreLearnWindowLut; delete [] PostLearnWindowLut; } ////////////////////LearnFroemkeDan LearnFroemkeDan::LearnFroemkeDan( connection* con, float _maxWeight, float _LearnRate, float _TauPreEff, float _TauPostEff, float _TauPeak1, float _TauPeak2, float _Amp1, float _Amp2) : learning(con, _maxWeight), LearnRate(_LearnRate), TauPreEff(_TauPreEff), TauPostEff(_TauPostEff), TauPeak1(_TauPeak1), TauPeak2(_TauPeak2), Amp1(_Amp1), Amp2(_Amp2), LastSpikesPre(0), LastSpikesPost(0), SecondLastSpikesPre(0), SecondLastSpikesPost(0), PreLearnWindowLut(0), PostLearnWindowLut(0), PreEfficacyLut(0), PostEfficacyLut(0) { // Initialize lookup tables for learning window // PreLearnWindowLut = AlphaFktLut(PreLutN, TauPeak1, LearnRate*Amp1, 0, dt); // PostLearnWindowLut = AlphaFktLut(PostLutN, TauPeak2, LearnRate*Amp2, 0, dt); PreLearnWindowLut = ExpDecayLut(PreLutN, TauPeak1, LearnRate*Amp1, 0, dt); PostLearnWindowLut = ExpDecayLut(PostLutN, TauPeak2, LearnRate*Amp2, 0, dt); // initialize synaptic efficacy lookup tables float PreEffLutBound = 5*TauPreEff; float PostEffLutBound = 5*TauPostEff; PreEffLutN = int(PreEffLutBound/dt); PostEffLutN = int(PostEffLutBound/dt); PreEfficacyLut = new float [PreEffLutN]; PostEfficacyLut = new float [PostEffLutN]; float t; for (int n=0;nlast_N_firings; while (spike < TargetLayer->N_firings) { i= TargetLayer->firings[spike++][1]; SecondLastSpikesPost[i] = LastSpikesPost[i]; LastSpikesPost[i] = TotalTime; TDiffPost = TotalTime - SecondLastSpikesPost[i]; if (TDiffPost < PostEffLutN) { EffPost = PostEfficacyLut[TDiffPost]; } else { EffPost =1; } // calculate LTP for (j=0;j0) && (TDiff maxWeight) *s_pre[i][j] = maxWeight; if (*s_pre[i][j] < minWeight) *s_pre[i][j] = minWeight; } } // learn all presynaptic neurons with this time step arriving spikes float* CurWeight; int CurDelay; int SynNumber; k=SourceLayer->N_firings; while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!! { SourceI=SourceLayer->firings[k][1]; CurDelay=t-SourceLayer->firings[k][0]; for (j=0; j< delays_length[SourceI][CurDelay]; j++) { // cout << "before j=" << j << "i=" << i << "CW=" << *CurWeight << "k=" << k << "\n";fflush(stdout); SynNumber = delays[SourceI][CurDelay][j]; // calculate EffPre only once because in this loop it is allways the same // (same presynaptic neuron, same delay) if (j==0) { TDiffPre = TotalTime - LastSpikesPre[SourceI][SynNumber]; if (TDiffPre 0) && (TDiff < PreLutN)) { *CurWeight *= (1+EffPre*EffPost*PreLearnWindowLut[TDiff]); if (*CurWeight > maxWeight) *CurWeight = maxWeight; if (*CurWeight < minWeight) *CurWeight = minWeight; } } } } LearnFroemkeDan::~LearnFroemkeDan() { DeleteArray2d(LastSpikesPre, ns); DeleteArray2d(SecondLastSpikesPre, ns); delete [] LastSpikesPost; delete [] SecondLastSpikesPost; delete [] PreLearnWindowLut; delete [] PostLearnWindowLut; delete [] PreEfficacyLut; delete [] PostEfficacyLut; } //////////////////////////////////// LearnSjoestroem::LearnSjoestroem( connection* con, float _maxWeight, float _LearnRate, float _TauLTDep, float _TauLTPot, float _Amp1, float _Amp2) : learning(con, _maxWeight), LearnRate(_LearnRate), TauLTDep(_TauLTDep), TauLTPot(_TauLTPot), Amp1(_Amp1), Amp2(_Amp2), LastSpikesPre(0), LastSpikesPost(0), LTPotLearnWindowLut(0), LTDepLearnWindowLut(0) { // Initialize lookup tables for learning window LTPotLearnWindowLut = ExpDecayLut(LTPotLutN, TauLTPot, LearnRate*Amp2, 0, dt, 4); LTDepLearnWindowLut = ExpDecayLut(LTDepLutN, TauLTDep, LearnRate*Amp1, 1, dt, 4); // initialize arrays for last spikes NewArray2d(LastSpikesPre, ns, M); for (int i=0; ilast_N_firings; while (spike < TargetLayer->N_firings) { i= TargetLayer->firings[spike++][1]; // calculate LTP for (j=0;j0) && (TDiff maxWeight) *s_pre[i][j] = maxWeight; if (*s_pre[i][j] < minWeight) *s_pre[i][j] = minWeight; } else { // no LTP --> store postsynaptic spike time for potential LTP LastSpikesPost[SourceI][ConIndex] = TotalTime; } } } // learn all presynaptic neurons with this time step arriving spikes float* CurWeight; int CurDelay; int SynNumber; k=SourceLayer->N_firings; while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!! { SourceI=SourceLayer->firings[k][1]; CurDelay=t-SourceLayer->firings[k][0]; for (j=0; j< delays_length[SourceI][CurDelay]; j++) { // cout << "before j=" << j << "i=" << i << "CW=" << *CurWeight << "k=" << k << "\n";fflush(stdout); SynNumber = delays[SourceI][CurDelay][j]; // save last spike timings LastSpikesPre[SourceI][SynNumber] = TotalTime; CurWeight = &s[SourceI][SynNumber]; // // cout << "hereithappens"; fflush(stdout); i=post[SourceI][SynNumber]; TDiff = TotalTime - LastSpikesPost[SourceI][SynNumber]; if ((TDiff > 0) && (TDiff < LTDepLutN)) { *CurWeight *= LTDepLearnWindowLut[TDiff]; if (*CurWeight > maxWeight) *CurWeight = maxWeight; if (*CurWeight < minWeight) *CurWeight = minWeight; } } } } LearnSjoestroem::~LearnSjoestroem() { DeleteArray2d(LastSpikesPre, ns); DeleteArray2d(LastSpikesPost, ns); // DeleteArray2d(SecondLastSpikesPre, ns); // DeleteArray2d(SecondLastSpikesPost, ns); delete [] LTPotLearnWindowLut; delete [] LTDepLearnWindowLut; // delete [] PreEfficacyLut; // delete [] PostEfficacyLut; }