1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345 |
- /*Copyright (C) 2005, 2006, 2007 Frank Michler, Philipps-University Marburg, Germany
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License
- as published by the Free Software Foundation; either version 2
- of the License, or (at your option) any later version.
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
- #include "sys.hpp" // for libcwd
- #include "debug.hpp" // for libcwd
- #include "learn.hpp"
- #include <gsl/gsl_rng.h>
- #include <gsl/gsl_randist.h>
- ////////////////////////////////
- ////////////////////////////////
- learning::learning(connection* c, float _maxWeight): SimElement(seLearning), con(c), maxWeight(_maxWeight), minWeight(0)
- {
- Name="Learning";
- cout << "Initialize Learning Object, maxWeight= " << maxWeight << "\n";
- int i,j,k;
- ConnectionInfo ConInf = con->GetConnectionInfo();
- Dmax = ConInf.Dmax;
- maximumDelay = ConInf.maximumDelay;
-
- cout << "Dmax=" << Dmax << "\n";
- TargetLayer = ConInf.TargetLayer;
- SourceLayer = ConInf.SourceLayer;
- nt = TargetLayer->N;
- ns = SourceLayer->N;
- dt = TargetLayer->GetDt();
- MacroTimeStep = TargetLayer->GetMacroTimeStep();
- cout << "Initialize Learning Object: N_Target =" << nt << "\n";
- post = ConInf.post;
- delays = ConInf.delays;
- delays_length = ConInf.delays_length;
- M = ConInf.M;
- // NewArray2d(sd,ns,M);
- // for (i=0;i<ns;++i) for (j=0;j<M;++j) sd[i][j] = 0;
- sd = ConInf.WeightDerivativePointer;
- s = ConInf.WeightPointer;
- // maxWeight = ConInf.MaxWeight;
- maxN_pre = ConInf.maxN_pre;
- // NewArray2d(sd_pre,nt,maxN_pre); // presynaptic weight derivatives
- // for (i=0;i<nt;++i) for (j=0;j<maxN_pre;++j) sd_pre[i][j] = 0;
- sd_pre = ConInf.sd_pre;
- s_pre = ConInf.s_pre;
- I_pre = ConInf.I_pre;
- N_pre = ConInf.N_pre;
- D_pre = ConInf.D_pre;
- m_pre = ConInf.m_pre;
- }
- learning::~learning()
- {
- cout << "learning Destructor\n"; fflush(stdout);
- }
- void learning::SetMinWeight(float value)
- {
- minWeight=value;
- }
- void learning::SetMaxWeight(float value)
- {
- maxWeight=value;
- }
- int learning::proceede(int t)
- {
-
- }
- int learning::prepare(int step)
- {
- }
-
- ////////////////////////////////
- levylearning::levylearning(connection* con, float _maxWeight, float lrate, float taua, float taub, float _LtpInc): learning(con, _maxWeight), LearnRate(lrate), TauA(taua), TauB(taub), LtpInc(_LtpInc)
- {
- int i, j;
- NewArray2d(LTP1,ns,1001+Dmax); // presynaptic trace (source)
- NewArray2d(LTP2,ns,1001+Dmax); // presynaptic trace (source)
- for (i=0;i<ns;i++) for (j=0;j<1001+Dmax;j++) {
- LTP1[i][j]=0.0;
- LTP2[i][j]=0.0;
- }
- decA = exp(-dt/TauA);
- decB = exp(-dt/TauB);
- }
- levylearning::~levylearning()
- {
- DeleteArray2d(LTP1,ns);
- DeleteArray2d(LTP2,ns);
- }
- int levylearning::proceede(int TotalTime)
- {
- int t = int(TotalTime % MacroTimeStep);
- int i,j,k;
- // increase learning potentials for each spike of last time step
- int spike = SourceLayer->last_N_firings;
- if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
- {
- cout << "programming error, wrong firing indices \n";
- cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
- cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
- cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
- }
- // update presynaptic learning potential (for ltp)
- while (spike < SourceLayer->N_firings) {
- // update learning pot
- LTP1[SourceLayer->firings[spike][1]][t+Dmax] = LtpInc; // no accumulation!!
- LTP2[SourceLayer->firings[spike++][1]][t+Dmax] = LtpInc;
- }
- // update
- spike = TargetLayer->last_N_firings;
- while (spike < TargetLayer->N_firings)
- {
- i= TargetLayer->firings[spike++][1];
- // LTD[i]=0.12;
- // calculate LTP
- for (j=0;j<N_pre[i];j++) {
- *s_pre[i][j]+= LearnRate *((LTP1[I_pre[i][j]][t+Dmax-D_pre[i][j]-1] - LTP2[I_pre[i][j]][t+Dmax-D_pre[i][j]-1]) - *s_pre[i][j]) ;
- if (*s_pre[i][j] < 0) *s_pre[i][j]=0;
- if (*s_pre[i][j] > maxWeight) *s_pre[i][j]=maxWeight;
- }
- // this spike was after pre-synaptic spikes
- }
- // decrease potentials
- for (i=0;i<ns;++i) LTP1[i][t+Dmax+1]= LTP1[i][t+Dmax] * decA;
- for (i=0;i<ns;++i) LTP2[i][t+Dmax+1]= LTP2[i][t+Dmax] * decB;
- // for (i=0;i<nt;++i) LTD[i]*=dec_post;
-
- // rec->record(dt*TotalTime, (LTP1[0][t]-LTP2[0][t]), (LTP1[ns/2][t]-LTP2[ns/2][t]));
- }
- int levylearning::prepare(int step)
- {
- int i,j,k;
- // cout << "levylearning::prepare()\n";
- for (i=0;i<ns;i++) // prepare for the next sec
- for (j=0;j<Dmax+1;j++) {
- LTP1[i][j]=LTP1[i][1000+j];
- LTP2[i][j]=LTP2[i][1000+j];
- }
-
- // for (i=0;i<ns;i++) // modify only exc connections
- // for (j=0;j<M;j++) {
- // //if (sd[i][j] > 0.0)
- // // cout << sd[i][j] << " ";
- // // s[i][j]+= sd[i][j];
- // // if (s[i][j]>maxWeight) s[i][j]=maxWeight;
- // // if (s[i][j]<0) s[i][j]=0.0;w
- // // sd[i][j] = 0; // reset weight derivatives
- // }
-
- }
- ////////////////////////
- izhlearning::izhlearning(connection* con, float _maxWeight): learning(con, _maxWeight)
- {
- int i,j,k;
- NewArray2d(LTP,ns,1001+Dmax); // presynaptic trace (source)
- LTD = new float[nt]; // postsynaptic trace (target)
- float TAULEARN_pre=50;
- dec_pre = exp(-dt/TAULEARN_pre);
- float TAULEARN_post=5;
- dec_post = exp(-dt/TAULEARN_post);
- cout << "initialize LTP\n";
- fflush(stdout);
- for (i=0;i<ns;i++) for (j=0;j<1001+Dmax;j++) LTP[i][j]=0.0;
- cout << "initialize LTD\n"; fflush(stdout);
- for (i=0;i<nt;i++) LTD[i]=0.0;
- cout << "initialized LTD\n"; fflush(stdout);
- }
- izhlearning::~izhlearning()
- {
- DeleteArray2d(LTP,ns);
- delete[] LTD;
- }
- int izhlearning::proceede(int TotalTime)
- {
- int t = int(TotalTime % MacroTimeStep);
- int i,j,k;
- // increase learning potentials for each spike of last time step
- int spike = SourceLayer->last_N_firings;
- if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
- {
- cout << "programming error, wrong firing indices \n";
- cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
- cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
- cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
- }
- // update presynaptic learning potential (for ltp)
- while (spike < SourceLayer->N_firings) LTP[SourceLayer->firings[spike++][1]][t+Dmax] += 0.12;
- // update
- spike = TargetLayer->last_N_firings;
- while (spike < TargetLayer->N_firings)
- {
- i= TargetLayer->firings[spike++][1];
- LTD[i]=0.12;
- // calculate LTP
- for (j=0;j<N_pre[i];j++) {
- *sd_pre[i][j]+=LTP[I_pre[i][j]][t+Dmax-D_pre[i][j]-1];
- // if (*sd_pre[i][j] > 0) cout << *sd_pre[i][j] << " ";
- }
- // this spike was after pre-synaptic spikes
- }
- for (i=0;i<ns;++i) LTP[i][t+Dmax+1]= LTP[i][t+Dmax] * dec_pre; //original: LTP[i][t+Dmax+1]=0.95*LTP[i][t+Dmax];
- for (i=0;i<nt;++i) LTD[i]*=dec_post;
- // calculate LTD;
- // k=SourceLayer->N_firings;
- // while (t-SourceLayer->firings[--k][0] < Dmax)
- // {
- // for (j=0; j< delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]]; j++)
- // {
- // i=post[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]];
- // sd[SourceLayer->firings[k][1]][delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]]-=LTD[i]; // learning (ToDo: turn learning off/on)
- // }
- // }
- }
- int izhlearning::prepare(int step)
- {
- int i,j,k;
- cout << "learning::prepare()\n";
- for (i=0;i<ns;i++) // prepare for the next sec
- for (j=0;j<Dmax+1;j++)
- LTP[i][j]=LTP[i][1000+j];
-
- for (i=0;i<ns;i++) // modify only exc connections
- for (j=0;j<M;j++) {
- //if (sd[i][j] > 0.0)
- // cout << sd[i][j] << " ";
- sd[i][j]*=0.9;
- s[i][j]+=0.01+sd[i][j];
- if (s[i][j]>maxWeight) s[i][j]=maxWeight;
- if (s[i][j]<0) s[i][j]=0.0;
- }
- }
- ////////////////////////
- LearnRossum1::LearnRossum1(connection* con, float _Cp, float _Cd, float _SynNoiseSigma, float _maxWeight): learning(con, _maxWeight), Cp(_Cp), Cd(_Cd), SynNoiseSigma(_SynNoiseSigma)
- {
- int i,j,k;
- NewArray2d(SourceLearnPot,ns,MacroTimeStep+1+Dmax); // presynaptic trace (source)
- TargetLearnPot = new float[nt]; // postsynaptic trace (target)
- float TAULEARN_pre=20;
- dec_pre = exp(-dt/TAULEARN_pre);
- float TAULEARN_post=20;
- dec_post = exp(-dt/TAULEARN_post);
- // Cd = Cp*0.000428571;
- // Cd = 0.003;
- // SynNoiseSigma = 0.015;
- cout << "initialize SourceLearnPot\n";
- fflush(stdout);
- for (i=0;i<ns;i++) for (j=0;j<MacroTimeStep+1+Dmax;j++) SourceLearnPot[i][j]=0.0;
- cout << "initialize TargetLearnPot\n"; fflush(stdout);
- for (i=0;i<nt;i++) TargetLearnPot[i]=0.0;
- cout << "initialized TargetLearnPot\n"; fflush(stdout);
- }
- LearnRossum1::~LearnRossum1()
- {
- cout << "LearnRossum1 Destructor\n";fflush(stdout);
- DeleteArray2d(SourceLearnPot, ns);
- delete [] TargetLearnPot;
- }
- int LearnRossum1::proceede(int TotalTime)
- {
- int t = int(TotalTime % MacroTimeStep);
- // cout << "LearnRossum1::proceede t=" << t << "\n"; fflush(stdout);
- int i,j,k;
- // increase learning potentials for each spike of last time step
- int spike = SourceLayer->last_N_firings;
- if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
- {
- cout << "programming error, wrong firing indices \n";
- cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
- cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
- cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
- }
- // update presynaptic learning potential (for ltp)
- while (spike < SourceLayer->N_firings) SourceLearnPot[SourceLayer->firings[spike++][1]][t+Dmax] += 1.0;
- // update
- spike = TargetLayer->last_N_firings;
- // cout << "ltp ";fflush(stdout);
- while (spike < TargetLayer->N_firings)
- {
- i= TargetLayer->firings[spike++][1];
- TargetLearnPot[i]=1;
- // calculate LTP
- for (j=0;j<N_pre[i];j++) {
- *s_pre[i][j] += (Cp+ *s_pre[i][j]*gsl_ran_gaussian(gslr, SynNoiseSigma))*SourceLearnPot[I_pre[i][j]][t+Dmax-D_pre[i][j]-1];
- // if (SourceLearnPot[I_pre[i][j]][t+Dmax-D_pre[i][j]-1] > 0) cout << " BIGG";
- // cout << Cp* SourceLearnPot[I_pre[i][j]][t+Dmax-D_pre[i][j]-1] << " ";
- }
- // this spike was after pre-synaptic spikes
- }
- for (i=0;i<ns;++i) SourceLearnPot[i][t+Dmax+1]= SourceLearnPot[i][t+Dmax] * dec_pre; //original: SourceLearnPot[i][t+Dmax+1]=0.95*SourceLearnPot[i][t+Dmax];
- for (i=0;i<nt;++i) TargetLearnPot[i]*=dec_post;
- // calculate LTD;
- // cout << "ltd "; fflush(stdout);
- float* CurWeight;
- k=SourceLayer->N_firings;
- while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!!
- {
- int SourceI=SourceLayer->firings[k][1];
- // cout << "startwhile k=" << k; fflush(stdout);
- // cout << "maximumDelay=" << maximumDelay << " SourceI=" << SourceI;
- // cout << " delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]]=" << delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]] << "\n"; fflush(stdout);
- for (j=0; j< delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]]; j++)
- {
- // cout << "before j=" << j << "i=" << i << "CW=" << *CurWeight << "k=" << k << "\n";fflush(stdout);
- CurWeight = &s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]];
- // // cout << "hereithappens"; fflush(stdout);
- i=post[SourceLayer->firings[k][1]][delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]];
- // cout << "TargetLearnPot[i]=" << TargetLearnPot[i]; fflush(stdout);
- // if ((i<0) || (i>=nt)) {
- // cout << "i out of range\n";
- // fflush(stdout);
- // } else
- // {
- // cout << "TargetLearnPot[i]=" << TargetLearnPot[i];
- // fflush(stdout);
- // }
- // cout << "j=" << j << "i=" << i << "CW=" << *CurWeight << "\n";fflush(stdout);
- // cout << "TargetLearnPot[i]=" << TargetLearnPot[i]; fflush(stdout);
- // *CurWeight -= *CurWeight*Cd*TargetLearnPot[i];
- // float DeltaW = *CurWeight*(-Cd +gsl_ran_gaussian(gslr, SynNoiseSigma))* TargetLearnPot[i];
- // cout << DeltaW << " " << TargetLearnPot[i] << "|"; fflush(stdout);
- *CurWeight +=*CurWeight*(-Cd +gsl_ran_gaussian(gslr, SynNoiseSigma))* TargetLearnPot[i];
- if (*CurWeight < 0) *CurWeight=0;
- // s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]] -= TargetLearnPot[i]; // learning (ToDo: turn learning off/on)
- // cout << "after j=" << j << "i=" << i << "CW=" << *CurWeight << "\n";fflush(stdout);
- }
- }
- // cout << "fin "; fflush(stdout);
- }
- int LearnRossum1::prepare(int step)
- {
- int i,j,k;
- for (i=0;i<ns;i++) // prepare for the next sec
- for (j=0;j<Dmax+1;j++)
- SourceLearnPot[i][j]=SourceLearnPot[i][MacroTimeStep+j];
- }
- /////////////////////////////////////////
- LearnAntiRossum1::LearnAntiRossum1(connection* con, float _Cp, float _Cd, float _SynNoiseSigma, float _maxWeight): LearnRossum1(con, _Cp, _Cd, _SynNoiseSigma, _maxWeight)
- {
- }
- // proceede as in LearnRossum1::proceede, but change LTP and TargetLearnPot
- int LearnAntiRossum1::proceede(int TotalTime)
- {
- int t = int(TotalTime % MacroTimeStep);
- // cout << "LearnAntiRossum1::proceede t=" << t << "\n"; fflush(stdout);
- int i,j,k;
- // increase learning potentials for each spike of last time step
- int spike = SourceLayer->last_N_firings;
- if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
- {
- cout << "programming error, wrong firing indices \n";
- cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
- cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
- cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
- }
- // update presynaptic learning potential (for ltp)
- while (spike < SourceLayer->N_firings) SourceLearnPot[SourceLayer->firings[spike++][1]][t+Dmax] = 1.0;
- // update
- spike = TargetLayer->last_N_firings;
- // cout << "ltp ";fflush(stdout);
- while (spike < TargetLayer->N_firings)
- {
- i= TargetLayer->firings[spike++][1];
- TargetLearnPot[i]=1;
- // calculate LTP
- for (j=0;j<N_pre[i];j++) {
- *s_pre[i][j] += *s_pre[i][j]*(-Cd+ gsl_ran_gaussian(gslr, SynNoiseSigma))*SourceLearnPot[I_pre[i][j]][t+Dmax-D_pre[i][j]-1];
- if (*s_pre[i][j] < 0) *s_pre[i][j]=0;
- }
- // this spike was after pre-synaptic spikes
- }
- for (i=0;i<ns;++i) SourceLearnPot[i][t+Dmax+1]= SourceLearnPot[i][t+Dmax] * dec_pre; //original: SourceLearnPot[i][t+Dmax+1]=0.95*SourceLearnPot[i][t+Dmax];
- for (i=0;i<nt;++i) TargetLearnPot[i]*=dec_post;
- // calculate LTD;
- float* CurWeight;
- k=SourceLayer->N_firings;
- while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!!
- {
- int SourceI=SourceLayer->firings[k][1];
- for (j=0; j< delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]]; j++)
- {
- CurWeight = &s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]];
- i=post[SourceLayer->firings[k][1]][delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]];
- *CurWeight +=(Cp +*CurWeight*gsl_ran_gaussian(gslr, SynNoiseSigma))* TargetLearnPot[i];
- if (*CurWeight < 0) *CurWeight=0;
- }
- }
- }
- ///////////////////////////////////////
- LearnHebbLP2::LearnHebbLP2(connection* con, float _MaxWeight, float TauDec, float _BaseLine, float _LearnSpeed, bool _Accumulate): learning(con, _MaxWeight), BaseLine(_BaseLine), LearnSpeed(_LearnSpeed), LtpInc(1), Accumulate(_Accumulate)
- {
- LtpDecFac = exp(-dt/TauDec);
- int i,j,k;
- NewArray2d(LTP,ns,1001+Dmax); // presynaptic trace (source)
- cout << "LearnHebbLP2\n";
- cout << "LearnSpeed = " << LearnSpeed << " TauDec=" << TauDec << "\n";
- cout << "Accumulate=" << Accumulate << "\n";
- cout << "initialize LTP\n";
- fflush(stdout);
- for (i=0;i<ns;i++) for (j=0;j<1001+Dmax;j++) LTP[i][j]=0.0;
-
- }
- LearnHebbLP2::~LearnHebbLP2()
- {
- DeleteArray2d(LTP,ns);
- }
- int LearnHebbLP2::WriteSimInfo(fstream &fw)
- {
- fw << "<" << seTypeString << " id=\"" << IdNumber << "\" Type=\"" << seType << "\" Name=\"" << Name << "\"> \n";
- fw << "<LearnConnection id=\"" << con->IdNumber << "\"/> \n";
- fw << "<MaxWeight Value=\"" << maxWeight << "\"/> \n";
- fw << "<LtpDecFac Value=\"" << LtpDecFac << "\"/> \n";
- fw << "<BaseLine Value=\"" << BaseLine << "\"/> \n";
- fw << "<LearnSpeed Value=\"" << LearnSpeed << "\"/> \n";
- fw << "<Accumulate Value=\"" << Accumulate << "\"/> \n";
- fw << "<LtpInc Value=\"" << LtpInc << "\"/> \n";
- fw << "</" << seTypeString << "> \n";
- }
- int LearnHebbLP2::proceede(int TotalTime)
- {
- // cout <<"L";fflush(stdout); //remove
- int t = int(TotalTime % MacroTimeStep);
- int i,j,k;
- // increase learning potentials for each spike of last time step
- int spike = SourceLayer->last_N_firings;
- if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
- {
- cout << "programming error, wrong firing indices \n";
- cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
- cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
- cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
- }
- // update presynaptic learning potential (for ltp)
- if (Accumulate) while (spike < SourceLayer->N_firings) LTP[SourceLayer->firings[spike++][1]][t+Dmax] += LtpInc;
- else while (spike < SourceLayer->N_firings) LTP[SourceLayer->firings[spike++][1]][t+Dmax] = LtpInc;
- // update
- spike = TargetLayer->last_N_firings;
- while (spike < TargetLayer->N_firings)
- {
- i= TargetLayer->firings[spike++][1];
- // calculate LTP
- for (j=0;j<N_pre[i];j++) {
- *s_pre[i][j]+=LearnSpeed*(LTP[I_pre[i][j]][t+Dmax-D_pre[i][j]-1] - BaseLine);
- if (*s_pre[i][j] > maxWeight) *s_pre[i][j]=maxWeight;
- else if (*s_pre[i][j] < minWeight) *s_pre[i][j]=minWeight;
- // if (*sd_pre[i][j] > 0) cout << *sd_pre[i][j] << " ";
- }
- }
- for (i=0;i<ns;++i) LTP[i][t+Dmax+1]= LTP[i][t+Dmax] * LtpDecFac;
- }
- int LearnHebbLP2::prepare(int step)
- {
- int i,j,k;
- for (i=0;i<ns;i++) // prepare for the next sec
- for (j=0;j<Dmax+1;j++)
- LTP[i][j]=LTP[i][MacroTimeStep+j];
- // if (Normalize) NormalizeWeights();
- // for (i=0;i<ns;i++)
- // for (j=0;j<M;j++)
- // {
- // if (s[i][j]>maxWeight) s[i][j]=maxWeight;
- // if (s[i][j]<0) s[i][j]=0.0;
- // }
-
-
- }
- //////////////////////////////////////
- LearnHebbLP2_norm::LearnHebbLP2_norm(connection* con, float _MaxWeight, float TauDec, float _BaseLine, float _LearnSpeed, float _WeightSum): learning(con, _MaxWeight), BaseLine(_BaseLine), LearnSpeed(_LearnSpeed), LtpInc(1), WeightSum(_WeightSum)
- {
- LtpDecFac = exp(-dt/TauDec);
- int i,j,k;
- NewArray2d(LTP,ns,1001+Dmax); // presynaptic trace (source)
- cout << "LearnHebbLP2_norm\n";
- cout << "LearnSpeed = " << LearnSpeed << " WeightSum=" << WeightSum << "\n";
- cout << "initialize LTP\n";
- fflush(stdout);
- for (i=0;i<ns;i++) for (j=0;j<1001+Dmax;j++) LTP[i][j]=0.0;
- if (WeightSum == 0) Normalize=false; else Normalize=true;
- if (Normalize) NormalizeWeights();
- }
- LearnHebbLP2_norm::~LearnHebbLP2_norm()
- {
- DeleteArray2d(LTP,ns);
- }
- int LearnHebbLP2_norm::proceede(int TotalTime)
- {
- int t = int(TotalTime % MacroTimeStep);
- int i,j,k;
- // increase learning potentials for each spike of last time step
- int spike = SourceLayer->last_N_firings;
- if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
- {
- cout << "programming error, wrong firing indices \n";
- cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
- cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
- cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
- }
- // update presynaptic learning potential (for ltp)
- while (spike < SourceLayer->N_firings) LTP[SourceLayer->firings[spike++][1]][t+Dmax] += LtpInc;
- // update
- spike = TargetLayer->last_N_firings;
- float CurWeightSum; // normalize
- while (spike < TargetLayer->N_firings)
- {
- i= TargetLayer->firings[spike++][1];
- // calculate LTP
- CurWeightSum=0;
- for (j=0;j<N_pre[i];j++) {
- *s_pre[i][j]+=LearnSpeed*(LTP[I_pre[i][j]][t+Dmax-D_pre[i][j]-1] - BaseLine);
- CurWeightSum += *s_pre[i][j]; //Normalize
- // if (*sd_pre[i][j] > 0) cout << *sd_pre[i][j] << " ";
- }
- // normalize
- if (CurWeightSum > 0) for(j=0;j<N_pre[i];++j) (*s_pre[i][j]) *= WeightSum/CurWeightSum;
- // this spike was after pre-synaptic spikes
- }
- for (i=0;i<ns;++i) LTP[i][t+Dmax+1]= LTP[i][t+Dmax] * LtpDecFac;
- }
- int LearnHebbLP2_norm::prepare(int step)
- {
- int i,j,k;
- for (i=0;i<ns;i++) // prepare for the next sec
- for (j=0;j<Dmax+1;j++)
- LTP[i][j]=LTP[i][MacroTimeStep+j];
- // if (Normalize) NormalizeWeights();
- // for (i=0;i<ns;i++)
- // for (j=0;j<M;j++)
- // {
- // if (s[i][j]>maxWeight) s[i][j]=maxWeight;
- // if (s[i][j]<0) s[i][j]=0.0;
- // }
-
-
- }
- int LearnHebbLP2_norm::NormalizeWeights()
- {
- int i,j;
- cout << "Normalize Weights";
- for(i=0;i<nt;++i) {
- float CurWeightSum=0;
- for(j=0;j<N_pre[i];++j) CurWeightSum += *s_pre[i][j];
- // cout << "cWS=" << CurWeightSum << " WS=" << WeightSum << " ";
- if (CurWeightSum > 0) for(j=0;j<N_pre[i];++j) (*s_pre[i][j]) *= WeightSum/CurWeightSum;
- }
- }
- //////////////////////////////////////
- LearnPrePost::LearnPrePost(connection* con, float _maxWeight, float _TauLearnPre, float _TauLearnPost): learning(con, _maxWeight)
- {
- int i,j,k;
- NewArray2d(LPpre,ns,MacroTimeStep+1+Dmax); // presynaptic trace (source)
- LPpost = new float[nt]; // postsynaptic trace (target)
- dec_pre = exp(-dt/_TauLearnPre);
- dec_pre = exp(-dt/_TauLearnPost);
- cout << "initialize LPpre\n";
- fflush(stdout);
- for (i=0;i<ns;i++) for (j=0;j<MacroTimeStep+1+Dmax;j++) LPpre[i][j]=0.0;
- cout << "initialize LPpost\n"; fflush(stdout);
- for (i=0;i<nt;i++) LPpost[i]=0.0;
- cout << "initialized LPpost\n"; fflush(stdout);
- }
- LearnPrePost::~LearnPrePost()
- {
- cout << "LearnPrePost Destructor\n";fflush(stdout);
- DeleteArray2d(LPpre, ns);
- delete [] LPpost;
- }
- int LearnPrePost::proceede(int TotalTime)
- {
- }
- int LearnPrePost::prepare(int step)
- {
- int i,j,k;
- for (i=0;i<ns;i++) // prepare for the next sec
- for (j=0;j<Dmax+1;j++)
- LPpre[i][j]=LPpre[i][MacroTimeStep+j];
-
- }
- ////////////////////////////////////////////
- LearnHebbLP3::LearnHebbLP3(connection* con, float _maxWeight, float _TauLearnPre, float _TauLearnPost, float _BaseLine, float _LearnRate, bool _Accumulate): LearnPrePost(con, _maxWeight, _TauLearnPre, _TauLearnPost), LearnRate(_LearnRate), BaseLine(_LearnRate*_BaseLine), Accumulate(_Accumulate)
- {
- BaseLine = BaseLine*LearnRate;
- // see comment in LearnHebbLP3::proceede(int TotalTime)
- }
- int LearnHebbLP3::proceede(int TotalTime)
- {
- // learning rule: dw/dt = LearnRate*(LPpre-Lbaseline)*LPpost
- // in this implementation learn rate is used as LPpreInc
- // this is equivalent to the above formula if Lbaseline is adjusted correspondingly
- // Lbaseline_corr = Lbaseline*LearnRate
- // the advantage is, that there is one multiplication (with *LearnRate) less in the loop!
- // if const sum normalization is done Lbaseline shouldn't have any effect anyway.
- int t = int(TotalTime % MacroTimeStep);
- int i,j,k;
- // increase learning potentials for each spike of last time step
- int spike = SourceLayer->last_N_firings;
- if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
- {
- cout << "programming error, wrong firing indices \n";
- cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
- cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
- cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
- }
- // update presynaptic learning potential (for ltp)
- while (spike < SourceLayer->N_firings) LPpre[SourceLayer->firings[spike++][1]][t+Dmax] += LearnRate;
- // update
- spike = TargetLayer->last_N_firings;
- // cout << "ltp ";fflush(stdout);
- while (spike < TargetLayer->N_firings)
- {
- i= TargetLayer->firings[spike++][1];
- LPpost[i]+=1;
- // calculate LTP
- for (j=0;j<N_pre[i];j++) {
- *s_pre[i][j] += LPpost[i]*(LPpre[I_pre[i][j]][t+Dmax-D_pre[i][j]-1]-BaseLine);
- if (*s_pre[i][j] > maxWeight) *s_pre[i][j] = maxWeight;
- if (*s_pre[i][j] < minWeight) *s_pre[i][j] = minWeight;
- }
- // this spike was after pre-synaptic spikes
- // update postsynaptic potential
- }
- // decreas synaptic potentials
- for (i=0;i<ns;++i) LPpre[i][t+Dmax+1]= LPpre[i][t+Dmax] * dec_pre;
- for (i=0;i<nt;++i) LPpost[i]*=dec_post;
- }
- /////////////////////////////////////////////////////////
- LearnFBInh::LearnFBInh(connection* con, float _maxWeight, float TauLearnPre, float TauLearnPost, float _Cpre, float _Cdep): LearnPrePost(con, _maxWeight, TauLearnPre, TauLearnPost), Cpre(_Cpre), Cdep(_Cdep)
- {
- cout << "LearnFBInh: MaxWeight=" << maxWeight << "\n";
- }
- // LTP: according to pre->post pairing (exp learning potential)
- // LTD: for every presynaptic spike, proportional to current weight
- int LearnFBInh::proceede(int TotalTime)
- {
- int t = int(TotalTime % MacroTimeStep);
- int i,j,k;
- // increase learning potentials for each spike of last time step
- int spike = SourceLayer->last_N_firings;
- if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
- {
- cout << "programming error, wrong firing indices \n";
- cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
- cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
- cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
- }
- // update presynaptic learning potential (for ltp)
- while (spike < SourceLayer->N_firings) LPpre[SourceLayer->firings[spike++][1]][t+Dmax] = Cpre;
- // update
- spike = TargetLayer->last_N_firings;
- // cout << "ltp ";fflush(stdout);
- while (spike < TargetLayer->N_firings)
- {
- i= TargetLayer->firings[spike++][1];
- LPpost[i]=1;
- // calculate LTP
- for (j=0;j<N_pre[i];j++) {
- // *s_pre[i][j] += (Cp+ *s_pre[i][j]*gsl_ran_gaussian(gslr, SynNoiseSigma))*LPpre[I_pre[i][j]][t+Dmax-D_pre[i][j]-1];
- *s_pre[i][j] += LPpre[I_pre[i][j]][t+Dmax-D_pre[i][j]-1];
- if (*s_pre[i][j] > maxWeight) *s_pre[i][j] = maxWeight;
- }
- // this spike was after pre-synaptic spikes
- }
- for (i=0;i<ns;++i) LPpre[i][t+Dmax+1]= LPpre[i][t+Dmax] * dec_pre; //original: LPpre[i][t+Dmax+1]=0.95*LPpre[i][t+Dmax];
- for (i=0;i<nt;++i) LPpost[i]*=dec_post;
- // calculate LTD;
- float* CurWeight;
- k=SourceLayer->N_firings;
- while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!!
- {
- int SourceI=SourceLayer->firings[k][1];
- for (j=0; j< delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]]; j++)
- {
- CurWeight = &s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]];
- i=post[SourceLayer->firings[k][1]][delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]];
- // *CurWeight -= *CurWeight*Cd*LPpost[i];
- // float DeltaW = *CurWeight*(-Cd +gsl_ran_gaussian(gslr, SynNoiseSigma))* LPpost[i];
- // cout << DeltaW << " " << LPpost[i] << "|"; fflush(stdout);
- *CurWeight -= *CurWeight*Cdep; //*(1-LPpost[i]);
- if (*CurWeight < 0) *CurWeight=0;
- // s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]] -= LPpost[i]; // learning (ToDo: turn learning off/on)
- // cout << "after j=" << j << "i=" << i << "CW=" << *CurWeight << "\n";fflush(stdout);
- }
- }
- // cout << "fin "; fflush(stdout);
- }
- //////////////////////////////////////////////
- LearnFWInh::LearnFWInh(connection* con, float _maxWeight, float TauLearnPre, float TauLearnPost, float _Cpre, float _Cdep): LearnPrePost(con, _maxWeight, TauLearnPre, TauLearnPost), Cpre(_Cpre), Cdep(_Cdep)
- {
- cout << "LearnFWInh: MaxWeight=" << maxWeight << "\n";
- }
- // LTD: according to pre->post pairing (exp learning potential), proportional to current weight
- // LTP: for every postsynaptic spike
- int LearnFWInh::proceede(int TotalTime)
- {
- int t = int(TotalTime % MacroTimeStep);
- int i,j,k;
- // increase learning potentials for each spike of last time step
- int spike = SourceLayer->last_N_firings;
- if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
- {
- cout << "programming error, wrong firing indices \n";
- cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
- cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
- cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
- }
- // update presynaptic learning potential (for ltp)
- while (spike < SourceLayer->N_firings) LPpre[SourceLayer->firings[spike++][1]][t+Dmax] = Cpre;
- // update
- spike = TargetLayer->last_N_firings;
- while (spike < TargetLayer->N_firings)
- {
- i= TargetLayer->firings[spike++][1];
- LPpost[i]=1;
- // calculate LTP
- for (j=0;j<N_pre[i];j++) {
- *s_pre[i][j] -= *s_pre[i][j]*Cdep*LPpre[I_pre[i][j]][t+Dmax-D_pre[i][j]-1];
- if (*s_pre[i][j] < 0) *s_pre[i][j] = 0;
- }
- // this spike was after pre-synaptic spikes
- }
- for (i=0;i<ns;++i) LPpre[i][t+Dmax+1]= LPpre[i][t+Dmax] * dec_pre; //original: LPpre[i][t+Dmax+1]=0.95*LPpre[i][t+Dmax];
- for (i=0;i<nt;++i) LPpost[i]*=dec_post;
- // calculate LTD;
- float* CurWeight;
- k=SourceLayer->N_firings;
- while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!!
- {
- int SourceI=SourceLayer->firings[k][1];
- for (j=0; j< delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]]; j++)
- {
- CurWeight = &s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]];
- i=post[SourceLayer->firings[k][1]][delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]];
- *CurWeight += *CurWeight*Cpre;
- if (*CurWeight < 0) *CurWeight=0;
- }
- }
- }
- /////////////////////////////////////////////////////////
- LearnAntiHebb::LearnAntiHebb(connection* con, float _maxWeight, float TauLearnPre, float TauLearnPost, float _Cpre, float _Cdep): LearnPrePost(con, _maxWeight, TauLearnPre, TauLearnPost), Cpre(_Cpre), Cdep(_Cdep)
- {
- cout << "LearnAntiHebb: MaxWeight=" << maxWeight << "\n";
- }
- // Dummy// Dummy// Dummy// Dummy// Dummy// Dummy// Dummy
- // this object is not in use yet (use Learnhebblp2 instead)
- int LearnAntiHebb::proceede(int TotalTime)
- {
- int t = int(TotalTime % MacroTimeStep);
- int i,j,k;
- // increase learning potentials for each spike of last time step
- int spike = SourceLayer->last_N_firings;
- if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
- {
- cout << "programming error, wrong firing indices \n";
- cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
- cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
- cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
- }
- // update presynaptic learning potential (for ltp)
- while (spike < SourceLayer->N_firings) LPpre[SourceLayer->firings[spike++][1]][t+Dmax] = 1;
- // update
- spike = TargetLayer->last_N_firings;
- // cout << "ltp ";fflush(stdout);
- while (spike < TargetLayer->N_firings)
- {
- i= TargetLayer->firings[spike++][1];
- LPpost[i]=1;
- // calculate LTP
- for (j=0;j<N_pre[i];j++) {
- // *s_pre[i][j] += (Cp+ *s_pre[i][j]*gsl_ran_gaussian(gslr, SynNoiseSigma))*LPpre[I_pre[i][j]][t+Dmax-D_pre[i][j]-1];
- *s_pre[i][j] += Cpre*(1-LPpre[I_pre[i][j]][t+Dmax-D_pre[i][j]-1]);
- if (*s_pre[i][j] > maxWeight) *s_pre[i][j] = maxWeight;
- }
- // this spike was after pre-synaptic spikes
- }
- for (i=0;i<ns;++i) LPpre[i][t+Dmax+1]= LPpre[i][t+Dmax] * dec_pre; //original: LPpre[i][t+Dmax+1]=0.95*LPpre[i][t+Dmax];
- for (i=0;i<nt;++i) LPpost[i]*=dec_post;
- // calculate LTD;
- float* CurWeight;
- k=SourceLayer->N_firings;
- while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!!
- {
- int SourceI=SourceLayer->firings[k][1];
- for (j=0; j< delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]]; j++)
- {
- CurWeight = &s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]];
- i=post[SourceLayer->firings[k][1]][delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]];
- // *CurWeight -= *CurWeight*Cd*LPpost[i];
- // float DeltaW = *CurWeight*(-Cd +gsl_ran_gaussian(gslr, SynNoiseSigma))* LPpost[i];
- // cout << DeltaW << " " << LPpost[i] << "|"; fflush(stdout);
- *CurWeight -=Cdep*(1-LPpost[i]);
- if (*CurWeight < 0) *CurWeight=0;
- // s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]] -= LPpost[i]; // learning (ToDo: turn learning off/on)
- // cout << "after j=" << j << "i=" << i << "CW=" << *CurWeight << "\n";fflush(stdout);
- }
- }
- // cout << "fin "; fflush(stdout);
- }
- //////////////////////////LearnBiPoo
- LearnBiPoo::LearnBiPoo(connection* con, float _maxWeight, float _LearnRate, float _TauPeak1, float _TauPeak2, float _Amp1, float _Amp2)
- : learning(con, _maxWeight), LearnRate(_LearnRate),
- TauPeak1(_TauPeak1), TauPeak2(_TauPeak2), Amp1(_Amp1), Amp2(_Amp2),
- LastSpikesPre(0), LastSpikesPost(0),
- PreLearnWindowLut(0),
- PostLearnWindowLut(0)
- {
- // Initialize lookup tables for learning window
- PreLearnWindowLut = AlphaFktLut(PreLutN, TauPeak1, LearnRate*Amp1, 0, dt);
- PostLearnWindowLut = AlphaFktLut(PostLutN, TauPeak2, LearnRate*Amp2, 0, dt);
- // initialize arrays for last spikes
- NewArray2d(LastSpikesPre, ns, M);
- for (int i=0; i<ns; ++i) for (int j=0;j<M;++j) {
- LastSpikesPre[i][j]=int(-PreLutN);
- }
- LastSpikesPost = new int [nt];
- for (int i=0;i<nt;++i) LastSpikesPost[i] = int(-PostLutN);
- }
- int LearnBiPoo::prepare(int step)
- {
- }
- int LearnBiPoo::proceede(int TotalTime)
- {
- int t = int(TotalTime % MacroTimeStep);
- int i,j,k, TDiff;
- int spike;
- int SourceI;
- // learn all postsynaptic neurons (target layer) that fired last time step
- spike = TargetLayer->last_N_firings;
- while (spike < TargetLayer->N_firings)
- {
- i= TargetLayer->firings[spike++][1];
- LastSpikesPost[i] = TotalTime;
- // calculate LTP
- for (j=0;j<N_pre[i];j++) {
- TDiff = TotalTime - LastSpikesPre[I_pre[i][j]][m_pre[i][j]];
- if ((TDiff>0) && (TDiff<PostLutN)) {
- *s_pre[i][j] += PostLearnWindowLut[TDiff];
- if (*s_pre[i][j] > maxWeight) *s_pre[i][j] = maxWeight;
- if (*s_pre[i][j] < minWeight) *s_pre[i][j] = minWeight;
- }
- }
- }
-
-
- // learn all presynaptic neurons with this time step arriving spikes
- float* CurWeight;
- int CurDelay;
- int PostI;
- int SynNumber;
- k=SourceLayer->N_firings;
- while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!!
- {
- SourceI=SourceLayer->firings[k][1];
- CurDelay=t-SourceLayer->firings[k][0];
-
- for (j=0; j< delays_length[SourceI][CurDelay]; j++)
- {
- SynNumber = delays[SourceI][CurDelay][j];
- LastSpikesPre[SourceI][SynNumber] = TotalTime;
- CurWeight = &s[SourceI][SynNumber];
- i=post[SourceI][SynNumber];
- TDiff = TotalTime - LastSpikesPost[i];
- if ((TDiff > 0) && (TDiff < PreLutN)) {
- *CurWeight += PreLearnWindowLut[TDiff];
- if (*CurWeight > maxWeight) *CurWeight = maxWeight;
- if (*CurWeight < minWeight) *CurWeight = minWeight;
- }
- }
- }
- }
- LearnBiPoo::~LearnBiPoo()
- {
- DeleteArray2d(LastSpikesPre, ns);
- delete [] LastSpikesPost;
- delete [] PreLearnWindowLut;
- delete [] PostLearnWindowLut;
- }
- ////////////////////LearnFroemkeDan
-
- LearnFroemkeDan::LearnFroemkeDan(
- connection* con, float _maxWeight, float _LearnRate,
- float _TauPreEff, float _TauPostEff,
- float _TauPeak1, float _TauPeak2, float _Amp1, float _Amp2)
- : learning(con, _maxWeight), LearnRate(_LearnRate),
- TauPreEff(_TauPreEff), TauPostEff(_TauPostEff),
- TauPeak1(_TauPeak1), TauPeak2(_TauPeak2), Amp1(_Amp1), Amp2(_Amp2),
- LastSpikesPre(0), LastSpikesPost(0),
- SecondLastSpikesPre(0), SecondLastSpikesPost(0),
- PreLearnWindowLut(0),
- PostLearnWindowLut(0),
- PreEfficacyLut(0), PostEfficacyLut(0)
- {
- // Initialize lookup tables for learning window
- // PreLearnWindowLut = AlphaFktLut(PreLutN, TauPeak1, LearnRate*Amp1, 0, dt);
- // PostLearnWindowLut = AlphaFktLut(PostLutN, TauPeak2, LearnRate*Amp2, 0, dt);
- PreLearnWindowLut = ExpDecayLut(PreLutN, TauPeak1, LearnRate*Amp1, 0, dt);
- PostLearnWindowLut = ExpDecayLut(PostLutN, TauPeak2, LearnRate*Amp2, 0, dt);
- // initialize synaptic efficacy lookup tables
- float PreEffLutBound = 5*TauPreEff;
- float PostEffLutBound = 5*TauPostEff;
- PreEffLutN = int(PreEffLutBound/dt);
- PostEffLutN = int(PostEffLutBound/dt);
- PreEfficacyLut = new float [PreEffLutN];
- PostEfficacyLut = new float [PostEffLutN];
- float t;
- for (int n=0;n<PreEffLutN;++n) {
- t = n*dt;
- PreEfficacyLut[n] = 1-exp(-t/TauPreEff);
- }
- for (int n=0;n<PostEffLutN;++n) {
- t = n*dt;
- PostEfficacyLut[n] = 1-exp(-t/TauPostEff);
- }
- // initialize arrays for last spikes
- NewArray2d(LastSpikesPre, ns, M);
- for (int i=0; i<ns; ++i) for (int j=0;j<M;++j) LastSpikesPre[i][j]=int(-PreLutN);
- LastSpikesPost = new int [nt];
- for (int i=0;i<nt;++i) LastSpikesPost[i] = int(-PostLutN);
- // initialize arrays for second last spikes
- NewArray2d(SecondLastSpikesPre, ns, M);
- for (int i=0; i<ns; ++i) for (int j=0;j<M;++j) SecondLastSpikesPre[i][j]=int(-PreLutN-1);
- SecondLastSpikesPost = new int [nt];
- for (int i=0;i<nt;++i) SecondLastSpikesPost[i] = int(-PostLutN-1);
- }
- int LearnFroemkeDan::prepare(int step)
- {
- }
- int LearnFroemkeDan::proceede(int TotalTime)
- {
- int t = int(TotalTime % MacroTimeStep);
- int i,j,k, TDiff, TDiffPre, TDiffPost;
- int spike;
- float EffPre, EffPost;
- int SourceI;
- int ConIndex;
- // learn all postsynaptic neurons (target layer) that fired last time step
- spike = TargetLayer->last_N_firings;
- while (spike < TargetLayer->N_firings)
- {
- i= TargetLayer->firings[spike++][1];
- SecondLastSpikesPost[i] = LastSpikesPost[i];
- LastSpikesPost[i] = TotalTime;
- TDiffPost = TotalTime - SecondLastSpikesPost[i];
- if (TDiffPost < PostEffLutN) {
- EffPost = PostEfficacyLut[TDiffPost];
- } else {
- EffPost =1;
- }
- // calculate LTP
- for (j=0;j<N_pre[i];j++) {
- SourceI = I_pre[i][j];
- ConIndex= m_pre[i][j];
- TDiff = TotalTime - LastSpikesPre[SourceI][ConIndex];
- TDiffPre = LastSpikesPre[SourceI][ConIndex] - SecondLastSpikesPre[SourceI][ConIndex];
- if (TDiffPre<PreEffLutN) {
- EffPre=PreEfficacyLut[TDiffPre];
- } else {
- EffPre=1;
- }
- if ((TDiff>0) && (TDiff<PostLutN)) {
- *s_pre[i][j] *= (1+ EffPost*EffPre*PostLearnWindowLut[TDiff]);
- }
- if (*s_pre[i][j] > maxWeight) *s_pre[i][j] = maxWeight;
- if (*s_pre[i][j] < minWeight) *s_pre[i][j] = minWeight;
- }
- }
-
-
- // learn all presynaptic neurons with this time step arriving spikes
- float* CurWeight;
- int CurDelay;
- int SynNumber;
- k=SourceLayer->N_firings;
- while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!!
- {
- SourceI=SourceLayer->firings[k][1];
- CurDelay=t-SourceLayer->firings[k][0];
-
- for (j=0; j< delays_length[SourceI][CurDelay]; j++)
- {
- // cout << "before j=" << j << "i=" << i << "CW=" << *CurWeight << "k=" << k << "\n";fflush(stdout);
- SynNumber = delays[SourceI][CurDelay][j];
-
- // calculate EffPre only once because in this loop it is allways the same
- // (same presynaptic neuron, same delay)
- if (j==0) {
- TDiffPre = TotalTime - LastSpikesPre[SourceI][SynNumber];
- if (TDiffPre<PreEffLutN) {
- EffPre = PreEfficacyLut[TDiffPre];
- } else {
- EffPre = 1;
- }
- }
- // save last spike timings
- SecondLastSpikesPre[SourceI][SynNumber] = LastSpikesPre[SourceI][SynNumber];
- LastSpikesPre[SourceI][SynNumber] = TotalTime;
- CurWeight = &s[SourceI][SynNumber];
- // // cout << "hereithappens"; fflush(stdout);
- i=post[SourceI][SynNumber];
- TDiff = TotalTime - LastSpikesPost[i];
- TDiffPost = LastSpikesPost[i] - SecondLastSpikesPost[i];
- if (TDiffPost < PostEffLutN) {
- EffPost = PostEfficacyLut[TDiffPost];
- } else {
- EffPost = 1;
- }
- if ((TDiff > 0) && (TDiff < PreLutN)) {
- *CurWeight *= (1+EffPre*EffPost*PreLearnWindowLut[TDiff]);
- if (*CurWeight > maxWeight) *CurWeight = maxWeight;
- if (*CurWeight < minWeight) *CurWeight = minWeight;
- }
- }
- }
- }
- LearnFroemkeDan::~LearnFroemkeDan()
- {
- DeleteArray2d(LastSpikesPre, ns);
- DeleteArray2d(SecondLastSpikesPre, ns);
- delete [] LastSpikesPost;
- delete [] SecondLastSpikesPost;
- delete [] PreLearnWindowLut;
- delete [] PostLearnWindowLut;
- delete [] PreEfficacyLut;
- delete [] PostEfficacyLut;
- }
- ////////////////////////////////////
- LearnSjoestroem::LearnSjoestroem(
- connection* con, float _maxWeight, float _LearnRate,
- float _TauLTDep, float _TauLTPot, float _Amp1, float _Amp2)
- : learning(con, _maxWeight), LearnRate(_LearnRate),
- TauLTDep(_TauLTDep), TauLTPot(_TauLTPot), Amp1(_Amp1), Amp2(_Amp2),
- LastSpikesPre(0), LastSpikesPost(0),
- LTPotLearnWindowLut(0),
- LTDepLearnWindowLut(0)
- {
- // Initialize lookup tables for learning window
- LTPotLearnWindowLut = ExpDecayLut(LTPotLutN, TauLTPot, LearnRate*Amp2, 0, dt, 4);
- LTDepLearnWindowLut = ExpDecayLut(LTDepLutN, TauLTDep, LearnRate*Amp1, 1, dt, 4);
- // initialize arrays for last spikes
- NewArray2d(LastSpikesPre, ns, M);
- for (int i=0; i<ns; ++i) for (int j=0;j<M;++j) LastSpikesPre[i][j]=int(-LTPotLutN);
- NewArray2d(LastSpikesPost, ns, M);
- for (int i=0; i<ns; ++i) for (int j=0;j<M;++j) LastSpikesPost[i][j] = int(-LTDepLutN);
- }
- int LearnSjoestroem::prepare(int step)
- {
- }
- int LearnSjoestroem::proceede(int TotalTime)
- {
- int t = int(TotalTime % MacroTimeStep);
- int i,j,k, TDiff;
- int spike;
- int SourceI;
- int ConIndex;
- // learn all postsynaptic neurons (target layer) that fired last time step
- spike = TargetLayer->last_N_firings;
- while (spike < TargetLayer->N_firings)
- {
- i= TargetLayer->firings[spike++][1];
- // calculate LTP
- for (j=0;j<N_pre[i];j++) {
- SourceI = I_pre[i][j];
- ConIndex= m_pre[i][j];
- TDiff = TotalTime - LastSpikesPre[SourceI][ConIndex];
- if ((TDiff>0) && (TDiff<LTPotLutN)) { // LTP
- *s_pre[i][j] += LTPotLearnWindowLut[TDiff];
- // not storing postsynaptic spike time because LTP "wins" over LTD
- if (*s_pre[i][j] > maxWeight) *s_pre[i][j] = maxWeight;
- if (*s_pre[i][j] < minWeight) *s_pre[i][j] = minWeight;
- } else { // no LTP --> store postsynaptic spike time for potential LTP
- LastSpikesPost[SourceI][ConIndex] = TotalTime;
- }
- }
- }
-
-
- // learn all presynaptic neurons with this time step arriving spikes
- float* CurWeight;
- int CurDelay;
- int SynNumber;
- k=SourceLayer->N_firings;
- while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!!
- {
- SourceI=SourceLayer->firings[k][1];
- CurDelay=t-SourceLayer->firings[k][0];
-
- for (j=0; j< delays_length[SourceI][CurDelay]; j++)
- {
- // cout << "before j=" << j << "i=" << i << "CW=" << *CurWeight << "k=" << k << "\n";fflush(stdout);
- SynNumber = delays[SourceI][CurDelay][j];
-
- // save last spike timings
- LastSpikesPre[SourceI][SynNumber] = TotalTime;
- CurWeight = &s[SourceI][SynNumber];
- // // cout << "hereithappens"; fflush(stdout);
- i=post[SourceI][SynNumber];
- TDiff = TotalTime - LastSpikesPost[SourceI][SynNumber];
- if ((TDiff > 0) && (TDiff < LTDepLutN)) {
- *CurWeight *= LTDepLearnWindowLut[TDiff];
- if (*CurWeight > maxWeight) *CurWeight = maxWeight;
- if (*CurWeight < minWeight) *CurWeight = minWeight;
- }
- }
- }
- }
- LearnSjoestroem::~LearnSjoestroem()
- {
- DeleteArray2d(LastSpikesPre, ns);
- DeleteArray2d(LastSpikesPost, ns);
- // DeleteArray2d(SecondLastSpikesPre, ns);
- // DeleteArray2d(SecondLastSpikesPost, ns);
- delete [] LTPotLearnWindowLut;
- delete [] LTDepLearnWindowLut;
- // delete [] PreEfficacyLut;
- // delete [] PostEfficacyLut;
- }
|