learn.cpp 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345
  1. /*Copyright (C) 2005, 2006, 2007 Frank Michler, Philipps-University Marburg, Germany
  2. This program is free software; you can redistribute it and/or
  3. modify it under the terms of the GNU General Public License
  4. as published by the Free Software Foundation; either version 2
  5. of the License, or (at your option) any later version.
  6. This program is distributed in the hope that it will be useful,
  7. but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. GNU General Public License for more details.
  10. You should have received a copy of the GNU General Public License
  11. along with this program; if not, write to the Free Software
  12. Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  13. */
  14. #include "sys.hpp" // for libcwd
  15. #include "debug.hpp" // for libcwd
  16. #include "learn.hpp"
  17. #include <gsl/gsl_rng.h>
  18. #include <gsl/gsl_randist.h>
  19. ////////////////////////////////
  20. ////////////////////////////////
  21. learning::learning(connection* c, float _maxWeight): SimElement(seLearning), con(c), maxWeight(_maxWeight), minWeight(0)
  22. {
  23. Name="Learning";
  24. cout << "Initialize Learning Object, maxWeight= " << maxWeight << "\n";
  25. int i,j,k;
  26. ConnectionInfo ConInf = con->GetConnectionInfo();
  27. Dmax = ConInf.Dmax;
  28. maximumDelay = ConInf.maximumDelay;
  29. cout << "Dmax=" << Dmax << "\n";
  30. TargetLayer = ConInf.TargetLayer;
  31. SourceLayer = ConInf.SourceLayer;
  32. nt = TargetLayer->N;
  33. ns = SourceLayer->N;
  34. dt = TargetLayer->GetDt();
  35. MacroTimeStep = TargetLayer->GetMacroTimeStep();
  36. cout << "Initialize Learning Object: N_Target =" << nt << "\n";
  37. post = ConInf.post;
  38. delays = ConInf.delays;
  39. delays_length = ConInf.delays_length;
  40. M = ConInf.M;
  41. // NewArray2d(sd,ns,M);
  42. // for (i=0;i<ns;++i) for (j=0;j<M;++j) sd[i][j] = 0;
  43. sd = ConInf.WeightDerivativePointer;
  44. s = ConInf.WeightPointer;
  45. // maxWeight = ConInf.MaxWeight;
  46. maxN_pre = ConInf.maxN_pre;
  47. // NewArray2d(sd_pre,nt,maxN_pre); // presynaptic weight derivatives
  48. // for (i=0;i<nt;++i) for (j=0;j<maxN_pre;++j) sd_pre[i][j] = 0;
  49. sd_pre = ConInf.sd_pre;
  50. s_pre = ConInf.s_pre;
  51. I_pre = ConInf.I_pre;
  52. N_pre = ConInf.N_pre;
  53. D_pre = ConInf.D_pre;
  54. m_pre = ConInf.m_pre;
  55. }
  56. learning::~learning()
  57. {
  58. cout << "learning Destructor\n"; fflush(stdout);
  59. }
  60. void learning::SetMinWeight(float value)
  61. {
  62. minWeight=value;
  63. }
  64. void learning::SetMaxWeight(float value)
  65. {
  66. maxWeight=value;
  67. }
  68. int learning::proceede(int t)
  69. {
  70. }
  71. int learning::prepare(int step)
  72. {
  73. }
  74. ////////////////////////////////
  75. levylearning::levylearning(connection* con, float _maxWeight, float lrate, float taua, float taub, float _LtpInc): learning(con, _maxWeight), LearnRate(lrate), TauA(taua), TauB(taub), LtpInc(_LtpInc)
  76. {
  77. int i, j;
  78. NewArray2d(LTP1,ns,1001+Dmax); // presynaptic trace (source)
  79. NewArray2d(LTP2,ns,1001+Dmax); // presynaptic trace (source)
  80. for (i=0;i<ns;i++) for (j=0;j<1001+Dmax;j++) {
  81. LTP1[i][j]=0.0;
  82. LTP2[i][j]=0.0;
  83. }
  84. decA = exp(-dt/TauA);
  85. decB = exp(-dt/TauB);
  86. }
  87. levylearning::~levylearning()
  88. {
  89. DeleteArray2d(LTP1,ns);
  90. DeleteArray2d(LTP2,ns);
  91. }
  92. int levylearning::proceede(int TotalTime)
  93. {
  94. int t = int(TotalTime % MacroTimeStep);
  95. int i,j,k;
  96. // increase learning potentials for each spike of last time step
  97. int spike = SourceLayer->last_N_firings;
  98. if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
  99. {
  100. cout << "programming error, wrong firing indices \n";
  101. cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
  102. cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
  103. cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
  104. }
  105. // update presynaptic learning potential (for ltp)
  106. while (spike < SourceLayer->N_firings) {
  107. // update learning pot
  108. LTP1[SourceLayer->firings[spike][1]][t+Dmax] = LtpInc; // no accumulation!!
  109. LTP2[SourceLayer->firings[spike++][1]][t+Dmax] = LtpInc;
  110. }
  111. // update
  112. spike = TargetLayer->last_N_firings;
  113. while (spike < TargetLayer->N_firings)
  114. {
  115. i= TargetLayer->firings[spike++][1];
  116. // LTD[i]=0.12;
  117. // calculate LTP
  118. for (j=0;j<N_pre[i];j++) {
  119. *s_pre[i][j]+= LearnRate *((LTP1[I_pre[i][j]][t+Dmax-D_pre[i][j]-1] - LTP2[I_pre[i][j]][t+Dmax-D_pre[i][j]-1]) - *s_pre[i][j]) ;
  120. if (*s_pre[i][j] < 0) *s_pre[i][j]=0;
  121. if (*s_pre[i][j] > maxWeight) *s_pre[i][j]=maxWeight;
  122. }
  123. // this spike was after pre-synaptic spikes
  124. }
  125. // decrease potentials
  126. for (i=0;i<ns;++i) LTP1[i][t+Dmax+1]= LTP1[i][t+Dmax] * decA;
  127. for (i=0;i<ns;++i) LTP2[i][t+Dmax+1]= LTP2[i][t+Dmax] * decB;
  128. // for (i=0;i<nt;++i) LTD[i]*=dec_post;
  129. // rec->record(dt*TotalTime, (LTP1[0][t]-LTP2[0][t]), (LTP1[ns/2][t]-LTP2[ns/2][t]));
  130. }
  131. int levylearning::prepare(int step)
  132. {
  133. int i,j,k;
  134. // cout << "levylearning::prepare()\n";
  135. for (i=0;i<ns;i++) // prepare for the next sec
  136. for (j=0;j<Dmax+1;j++) {
  137. LTP1[i][j]=LTP1[i][1000+j];
  138. LTP2[i][j]=LTP2[i][1000+j];
  139. }
  140. // for (i=0;i<ns;i++) // modify only exc connections
  141. // for (j=0;j<M;j++) {
  142. // //if (sd[i][j] > 0.0)
  143. // // cout << sd[i][j] << " ";
  144. // // s[i][j]+= sd[i][j];
  145. // // if (s[i][j]>maxWeight) s[i][j]=maxWeight;
  146. // // if (s[i][j]<0) s[i][j]=0.0;w
  147. // // sd[i][j] = 0; // reset weight derivatives
  148. // }
  149. }
  150. ////////////////////////
  151. izhlearning::izhlearning(connection* con, float _maxWeight): learning(con, _maxWeight)
  152. {
  153. int i,j,k;
  154. NewArray2d(LTP,ns,1001+Dmax); // presynaptic trace (source)
  155. LTD = new float[nt]; // postsynaptic trace (target)
  156. float TAULEARN_pre=50;
  157. dec_pre = exp(-dt/TAULEARN_pre);
  158. float TAULEARN_post=5;
  159. dec_post = exp(-dt/TAULEARN_post);
  160. cout << "initialize LTP\n";
  161. fflush(stdout);
  162. for (i=0;i<ns;i++) for (j=0;j<1001+Dmax;j++) LTP[i][j]=0.0;
  163. cout << "initialize LTD\n"; fflush(stdout);
  164. for (i=0;i<nt;i++) LTD[i]=0.0;
  165. cout << "initialized LTD\n"; fflush(stdout);
  166. }
  167. izhlearning::~izhlearning()
  168. {
  169. DeleteArray2d(LTP,ns);
  170. delete[] LTD;
  171. }
  172. int izhlearning::proceede(int TotalTime)
  173. {
  174. int t = int(TotalTime % MacroTimeStep);
  175. int i,j,k;
  176. // increase learning potentials for each spike of last time step
  177. int spike = SourceLayer->last_N_firings;
  178. if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
  179. {
  180. cout << "programming error, wrong firing indices \n";
  181. cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
  182. cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
  183. cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
  184. }
  185. // update presynaptic learning potential (for ltp)
  186. while (spike < SourceLayer->N_firings) LTP[SourceLayer->firings[spike++][1]][t+Dmax] += 0.12;
  187. // update
  188. spike = TargetLayer->last_N_firings;
  189. while (spike < TargetLayer->N_firings)
  190. {
  191. i= TargetLayer->firings[spike++][1];
  192. LTD[i]=0.12;
  193. // calculate LTP
  194. for (j=0;j<N_pre[i];j++) {
  195. *sd_pre[i][j]+=LTP[I_pre[i][j]][t+Dmax-D_pre[i][j]-1];
  196. // if (*sd_pre[i][j] > 0) cout << *sd_pre[i][j] << " ";
  197. }
  198. // this spike was after pre-synaptic spikes
  199. }
  200. for (i=0;i<ns;++i) LTP[i][t+Dmax+1]= LTP[i][t+Dmax] * dec_pre; //original: LTP[i][t+Dmax+1]=0.95*LTP[i][t+Dmax];
  201. for (i=0;i<nt;++i) LTD[i]*=dec_post;
  202. // calculate LTD;
  203. // k=SourceLayer->N_firings;
  204. // while (t-SourceLayer->firings[--k][0] < Dmax)
  205. // {
  206. // for (j=0; j< delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]]; j++)
  207. // {
  208. // i=post[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]];
  209. // sd[SourceLayer->firings[k][1]][delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]]-=LTD[i]; // learning (ToDo: turn learning off/on)
  210. // }
  211. // }
  212. }
  213. int izhlearning::prepare(int step)
  214. {
  215. int i,j,k;
  216. cout << "learning::prepare()\n";
  217. for (i=0;i<ns;i++) // prepare for the next sec
  218. for (j=0;j<Dmax+1;j++)
  219. LTP[i][j]=LTP[i][1000+j];
  220. for (i=0;i<ns;i++) // modify only exc connections
  221. for (j=0;j<M;j++) {
  222. //if (sd[i][j] > 0.0)
  223. // cout << sd[i][j] << " ";
  224. sd[i][j]*=0.9;
  225. s[i][j]+=0.01+sd[i][j];
  226. if (s[i][j]>maxWeight) s[i][j]=maxWeight;
  227. if (s[i][j]<0) s[i][j]=0.0;
  228. }
  229. }
  230. ////////////////////////
  231. LearnRossum1::LearnRossum1(connection* con, float _Cp, float _Cd, float _SynNoiseSigma, float _maxWeight): learning(con, _maxWeight), Cp(_Cp), Cd(_Cd), SynNoiseSigma(_SynNoiseSigma)
  232. {
  233. int i,j,k;
  234. NewArray2d(SourceLearnPot,ns,MacroTimeStep+1+Dmax); // presynaptic trace (source)
  235. TargetLearnPot = new float[nt]; // postsynaptic trace (target)
  236. float TAULEARN_pre=20;
  237. dec_pre = exp(-dt/TAULEARN_pre);
  238. float TAULEARN_post=20;
  239. dec_post = exp(-dt/TAULEARN_post);
  240. // Cd = Cp*0.000428571;
  241. // Cd = 0.003;
  242. // SynNoiseSigma = 0.015;
  243. cout << "initialize SourceLearnPot\n";
  244. fflush(stdout);
  245. for (i=0;i<ns;i++) for (j=0;j<MacroTimeStep+1+Dmax;j++) SourceLearnPot[i][j]=0.0;
  246. cout << "initialize TargetLearnPot\n"; fflush(stdout);
  247. for (i=0;i<nt;i++) TargetLearnPot[i]=0.0;
  248. cout << "initialized TargetLearnPot\n"; fflush(stdout);
  249. }
  250. LearnRossum1::~LearnRossum1()
  251. {
  252. cout << "LearnRossum1 Destructor\n";fflush(stdout);
  253. DeleteArray2d(SourceLearnPot, ns);
  254. delete [] TargetLearnPot;
  255. }
  256. int LearnRossum1::proceede(int TotalTime)
  257. {
  258. int t = int(TotalTime % MacroTimeStep);
  259. // cout << "LearnRossum1::proceede t=" << t << "\n"; fflush(stdout);
  260. int i,j,k;
  261. // increase learning potentials for each spike of last time step
  262. int spike = SourceLayer->last_N_firings;
  263. if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
  264. {
  265. cout << "programming error, wrong firing indices \n";
  266. cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
  267. cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
  268. cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
  269. }
  270. // update presynaptic learning potential (for ltp)
  271. while (spike < SourceLayer->N_firings) SourceLearnPot[SourceLayer->firings[spike++][1]][t+Dmax] += 1.0;
  272. // update
  273. spike = TargetLayer->last_N_firings;
  274. // cout << "ltp ";fflush(stdout);
  275. while (spike < TargetLayer->N_firings)
  276. {
  277. i= TargetLayer->firings[spike++][1];
  278. TargetLearnPot[i]=1;
  279. // calculate LTP
  280. for (j=0;j<N_pre[i];j++) {
  281. *s_pre[i][j] += (Cp+ *s_pre[i][j]*gsl_ran_gaussian(gslr, SynNoiseSigma))*SourceLearnPot[I_pre[i][j]][t+Dmax-D_pre[i][j]-1];
  282. // if (SourceLearnPot[I_pre[i][j]][t+Dmax-D_pre[i][j]-1] > 0) cout << " BIGG";
  283. // cout << Cp* SourceLearnPot[I_pre[i][j]][t+Dmax-D_pre[i][j]-1] << " ";
  284. }
  285. // this spike was after pre-synaptic spikes
  286. }
  287. for (i=0;i<ns;++i) SourceLearnPot[i][t+Dmax+1]= SourceLearnPot[i][t+Dmax] * dec_pre; //original: SourceLearnPot[i][t+Dmax+1]=0.95*SourceLearnPot[i][t+Dmax];
  288. for (i=0;i<nt;++i) TargetLearnPot[i]*=dec_post;
  289. // calculate LTD;
  290. // cout << "ltd "; fflush(stdout);
  291. float* CurWeight;
  292. k=SourceLayer->N_firings;
  293. while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!!
  294. {
  295. int SourceI=SourceLayer->firings[k][1];
  296. // cout << "startwhile k=" << k; fflush(stdout);
  297. // cout << "maximumDelay=" << maximumDelay << " SourceI=" << SourceI;
  298. // cout << " delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]]=" << delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]] << "\n"; fflush(stdout);
  299. for (j=0; j< delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]]; j++)
  300. {
  301. // cout << "before j=" << j << "i=" << i << "CW=" << *CurWeight << "k=" << k << "\n";fflush(stdout);
  302. CurWeight = &s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]];
  303. // // cout << "hereithappens"; fflush(stdout);
  304. i=post[SourceLayer->firings[k][1]][delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]];
  305. // cout << "TargetLearnPot[i]=" << TargetLearnPot[i]; fflush(stdout);
  306. // if ((i<0) || (i>=nt)) {
  307. // cout << "i out of range\n";
  308. // fflush(stdout);
  309. // } else
  310. // {
  311. // cout << "TargetLearnPot[i]=" << TargetLearnPot[i];
  312. // fflush(stdout);
  313. // }
  314. // cout << "j=" << j << "i=" << i << "CW=" << *CurWeight << "\n";fflush(stdout);
  315. // cout << "TargetLearnPot[i]=" << TargetLearnPot[i]; fflush(stdout);
  316. // *CurWeight -= *CurWeight*Cd*TargetLearnPot[i];
  317. // float DeltaW = *CurWeight*(-Cd +gsl_ran_gaussian(gslr, SynNoiseSigma))* TargetLearnPot[i];
  318. // cout << DeltaW << " " << TargetLearnPot[i] << "|"; fflush(stdout);
  319. *CurWeight +=*CurWeight*(-Cd +gsl_ran_gaussian(gslr, SynNoiseSigma))* TargetLearnPot[i];
  320. if (*CurWeight < 0) *CurWeight=0;
  321. // s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]] -= TargetLearnPot[i]; // learning (ToDo: turn learning off/on)
  322. // cout << "after j=" << j << "i=" << i << "CW=" << *CurWeight << "\n";fflush(stdout);
  323. }
  324. }
  325. // cout << "fin "; fflush(stdout);
  326. }
  327. int LearnRossum1::prepare(int step)
  328. {
  329. int i,j,k;
  330. for (i=0;i<ns;i++) // prepare for the next sec
  331. for (j=0;j<Dmax+1;j++)
  332. SourceLearnPot[i][j]=SourceLearnPot[i][MacroTimeStep+j];
  333. }
  334. /////////////////////////////////////////
  335. LearnAntiRossum1::LearnAntiRossum1(connection* con, float _Cp, float _Cd, float _SynNoiseSigma, float _maxWeight): LearnRossum1(con, _Cp, _Cd, _SynNoiseSigma, _maxWeight)
  336. {
  337. }
  338. // proceede as in LearnRossum1::proceede, but change LTP and TargetLearnPot
  339. int LearnAntiRossum1::proceede(int TotalTime)
  340. {
  341. int t = int(TotalTime % MacroTimeStep);
  342. // cout << "LearnAntiRossum1::proceede t=" << t << "\n"; fflush(stdout);
  343. int i,j,k;
  344. // increase learning potentials for each spike of last time step
  345. int spike = SourceLayer->last_N_firings;
  346. if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
  347. {
  348. cout << "programming error, wrong firing indices \n";
  349. cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
  350. cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
  351. cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
  352. }
  353. // update presynaptic learning potential (for ltp)
  354. while (spike < SourceLayer->N_firings) SourceLearnPot[SourceLayer->firings[spike++][1]][t+Dmax] = 1.0;
  355. // update
  356. spike = TargetLayer->last_N_firings;
  357. // cout << "ltp ";fflush(stdout);
  358. while (spike < TargetLayer->N_firings)
  359. {
  360. i= TargetLayer->firings[spike++][1];
  361. TargetLearnPot[i]=1;
  362. // calculate LTP
  363. for (j=0;j<N_pre[i];j++) {
  364. *s_pre[i][j] += *s_pre[i][j]*(-Cd+ gsl_ran_gaussian(gslr, SynNoiseSigma))*SourceLearnPot[I_pre[i][j]][t+Dmax-D_pre[i][j]-1];
  365. if (*s_pre[i][j] < 0) *s_pre[i][j]=0;
  366. }
  367. // this spike was after pre-synaptic spikes
  368. }
  369. for (i=0;i<ns;++i) SourceLearnPot[i][t+Dmax+1]= SourceLearnPot[i][t+Dmax] * dec_pre; //original: SourceLearnPot[i][t+Dmax+1]=0.95*SourceLearnPot[i][t+Dmax];
  370. for (i=0;i<nt;++i) TargetLearnPot[i]*=dec_post;
  371. // calculate LTD;
  372. float* CurWeight;
  373. k=SourceLayer->N_firings;
  374. while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!!
  375. {
  376. int SourceI=SourceLayer->firings[k][1];
  377. for (j=0; j< delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]]; j++)
  378. {
  379. CurWeight = &s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]];
  380. i=post[SourceLayer->firings[k][1]][delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]];
  381. *CurWeight +=(Cp +*CurWeight*gsl_ran_gaussian(gslr, SynNoiseSigma))* TargetLearnPot[i];
  382. if (*CurWeight < 0) *CurWeight=0;
  383. }
  384. }
  385. }
  386. ///////////////////////////////////////
  387. LearnHebbLP2::LearnHebbLP2(connection* con, float _MaxWeight, float TauDec, float _BaseLine, float _LearnSpeed, bool _Accumulate): learning(con, _MaxWeight), BaseLine(_BaseLine), LearnSpeed(_LearnSpeed), LtpInc(1), Accumulate(_Accumulate)
  388. {
  389. LtpDecFac = exp(-dt/TauDec);
  390. int i,j,k;
  391. NewArray2d(LTP,ns,1001+Dmax); // presynaptic trace (source)
  392. cout << "LearnHebbLP2\n";
  393. cout << "LearnSpeed = " << LearnSpeed << " TauDec=" << TauDec << "\n";
  394. cout << "Accumulate=" << Accumulate << "\n";
  395. cout << "initialize LTP\n";
  396. fflush(stdout);
  397. for (i=0;i<ns;i++) for (j=0;j<1001+Dmax;j++) LTP[i][j]=0.0;
  398. }
  399. LearnHebbLP2::~LearnHebbLP2()
  400. {
  401. DeleteArray2d(LTP,ns);
  402. }
  403. int LearnHebbLP2::WriteSimInfo(fstream &fw)
  404. {
  405. fw << "<" << seTypeString << " id=\"" << IdNumber << "\" Type=\"" << seType << "\" Name=\"" << Name << "\"> \n";
  406. fw << "<LearnConnection id=\"" << con->IdNumber << "\"/> \n";
  407. fw << "<MaxWeight Value=\"" << maxWeight << "\"/> \n";
  408. fw << "<LtpDecFac Value=\"" << LtpDecFac << "\"/> \n";
  409. fw << "<BaseLine Value=\"" << BaseLine << "\"/> \n";
  410. fw << "<LearnSpeed Value=\"" << LearnSpeed << "\"/> \n";
  411. fw << "<Accumulate Value=\"" << Accumulate << "\"/> \n";
  412. fw << "<LtpInc Value=\"" << LtpInc << "\"/> \n";
  413. fw << "</" << seTypeString << "> \n";
  414. }
  415. int LearnHebbLP2::proceede(int TotalTime)
  416. {
  417. // cout <<"L";fflush(stdout); //remove
  418. int t = int(TotalTime % MacroTimeStep);
  419. int i,j,k;
  420. // increase learning potentials for each spike of last time step
  421. int spike = SourceLayer->last_N_firings;
  422. if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
  423. {
  424. cout << "programming error, wrong firing indices \n";
  425. cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
  426. cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
  427. cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
  428. }
  429. // update presynaptic learning potential (for ltp)
  430. if (Accumulate) while (spike < SourceLayer->N_firings) LTP[SourceLayer->firings[spike++][1]][t+Dmax] += LtpInc;
  431. else while (spike < SourceLayer->N_firings) LTP[SourceLayer->firings[spike++][1]][t+Dmax] = LtpInc;
  432. // update
  433. spike = TargetLayer->last_N_firings;
  434. while (spike < TargetLayer->N_firings)
  435. {
  436. i= TargetLayer->firings[spike++][1];
  437. // calculate LTP
  438. for (j=0;j<N_pre[i];j++) {
  439. *s_pre[i][j]+=LearnSpeed*(LTP[I_pre[i][j]][t+Dmax-D_pre[i][j]-1] - BaseLine);
  440. if (*s_pre[i][j] > maxWeight) *s_pre[i][j]=maxWeight;
  441. else if (*s_pre[i][j] < minWeight) *s_pre[i][j]=minWeight;
  442. // if (*sd_pre[i][j] > 0) cout << *sd_pre[i][j] << " ";
  443. }
  444. }
  445. for (i=0;i<ns;++i) LTP[i][t+Dmax+1]= LTP[i][t+Dmax] * LtpDecFac;
  446. }
  447. int LearnHebbLP2::prepare(int step)
  448. {
  449. int i,j,k;
  450. for (i=0;i<ns;i++) // prepare for the next sec
  451. for (j=0;j<Dmax+1;j++)
  452. LTP[i][j]=LTP[i][MacroTimeStep+j];
  453. // if (Normalize) NormalizeWeights();
  454. // for (i=0;i<ns;i++)
  455. // for (j=0;j<M;j++)
  456. // {
  457. // if (s[i][j]>maxWeight) s[i][j]=maxWeight;
  458. // if (s[i][j]<0) s[i][j]=0.0;
  459. // }
  460. }
  461. //////////////////////////////////////
  462. LearnHebbLP2_norm::LearnHebbLP2_norm(connection* con, float _MaxWeight, float TauDec, float _BaseLine, float _LearnSpeed, float _WeightSum): learning(con, _MaxWeight), BaseLine(_BaseLine), LearnSpeed(_LearnSpeed), LtpInc(1), WeightSum(_WeightSum)
  463. {
  464. LtpDecFac = exp(-dt/TauDec);
  465. int i,j,k;
  466. NewArray2d(LTP,ns,1001+Dmax); // presynaptic trace (source)
  467. cout << "LearnHebbLP2_norm\n";
  468. cout << "LearnSpeed = " << LearnSpeed << " WeightSum=" << WeightSum << "\n";
  469. cout << "initialize LTP\n";
  470. fflush(stdout);
  471. for (i=0;i<ns;i++) for (j=0;j<1001+Dmax;j++) LTP[i][j]=0.0;
  472. if (WeightSum == 0) Normalize=false; else Normalize=true;
  473. if (Normalize) NormalizeWeights();
  474. }
  475. LearnHebbLP2_norm::~LearnHebbLP2_norm()
  476. {
  477. DeleteArray2d(LTP,ns);
  478. }
  479. int LearnHebbLP2_norm::proceede(int TotalTime)
  480. {
  481. int t = int(TotalTime % MacroTimeStep);
  482. int i,j,k;
  483. // increase learning potentials for each spike of last time step
  484. int spike = SourceLayer->last_N_firings;
  485. if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
  486. {
  487. cout << "programming error, wrong firing indices \n";
  488. cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
  489. cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
  490. cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
  491. }
  492. // update presynaptic learning potential (for ltp)
  493. while (spike < SourceLayer->N_firings) LTP[SourceLayer->firings[spike++][1]][t+Dmax] += LtpInc;
  494. // update
  495. spike = TargetLayer->last_N_firings;
  496. float CurWeightSum; // normalize
  497. while (spike < TargetLayer->N_firings)
  498. {
  499. i= TargetLayer->firings[spike++][1];
  500. // calculate LTP
  501. CurWeightSum=0;
  502. for (j=0;j<N_pre[i];j++) {
  503. *s_pre[i][j]+=LearnSpeed*(LTP[I_pre[i][j]][t+Dmax-D_pre[i][j]-1] - BaseLine);
  504. CurWeightSum += *s_pre[i][j]; //Normalize
  505. // if (*sd_pre[i][j] > 0) cout << *sd_pre[i][j] << " ";
  506. }
  507. // normalize
  508. if (CurWeightSum > 0) for(j=0;j<N_pre[i];++j) (*s_pre[i][j]) *= WeightSum/CurWeightSum;
  509. // this spike was after pre-synaptic spikes
  510. }
  511. for (i=0;i<ns;++i) LTP[i][t+Dmax+1]= LTP[i][t+Dmax] * LtpDecFac;
  512. }
  513. int LearnHebbLP2_norm::prepare(int step)
  514. {
  515. int i,j,k;
  516. for (i=0;i<ns;i++) // prepare for the next sec
  517. for (j=0;j<Dmax+1;j++)
  518. LTP[i][j]=LTP[i][MacroTimeStep+j];
  519. // if (Normalize) NormalizeWeights();
  520. // for (i=0;i<ns;i++)
  521. // for (j=0;j<M;j++)
  522. // {
  523. // if (s[i][j]>maxWeight) s[i][j]=maxWeight;
  524. // if (s[i][j]<0) s[i][j]=0.0;
  525. // }
  526. }
  527. int LearnHebbLP2_norm::NormalizeWeights()
  528. {
  529. int i,j;
  530. cout << "Normalize Weights";
  531. for(i=0;i<nt;++i) {
  532. float CurWeightSum=0;
  533. for(j=0;j<N_pre[i];++j) CurWeightSum += *s_pre[i][j];
  534. // cout << "cWS=" << CurWeightSum << " WS=" << WeightSum << " ";
  535. if (CurWeightSum > 0) for(j=0;j<N_pre[i];++j) (*s_pre[i][j]) *= WeightSum/CurWeightSum;
  536. }
  537. }
  538. //////////////////////////////////////
  539. LearnPrePost::LearnPrePost(connection* con, float _maxWeight, float _TauLearnPre, float _TauLearnPost): learning(con, _maxWeight)
  540. {
  541. int i,j,k;
  542. NewArray2d(LPpre,ns,MacroTimeStep+1+Dmax); // presynaptic trace (source)
  543. LPpost = new float[nt]; // postsynaptic trace (target)
  544. dec_pre = exp(-dt/_TauLearnPre);
  545. dec_pre = exp(-dt/_TauLearnPost);
  546. cout << "initialize LPpre\n";
  547. fflush(stdout);
  548. for (i=0;i<ns;i++) for (j=0;j<MacroTimeStep+1+Dmax;j++) LPpre[i][j]=0.0;
  549. cout << "initialize LPpost\n"; fflush(stdout);
  550. for (i=0;i<nt;i++) LPpost[i]=0.0;
  551. cout << "initialized LPpost\n"; fflush(stdout);
  552. }
  553. LearnPrePost::~LearnPrePost()
  554. {
  555. cout << "LearnPrePost Destructor\n";fflush(stdout);
  556. DeleteArray2d(LPpre, ns);
  557. delete [] LPpost;
  558. }
  559. int LearnPrePost::proceede(int TotalTime)
  560. {
  561. }
  562. int LearnPrePost::prepare(int step)
  563. {
  564. int i,j,k;
  565. for (i=0;i<ns;i++) // prepare for the next sec
  566. for (j=0;j<Dmax+1;j++)
  567. LPpre[i][j]=LPpre[i][MacroTimeStep+j];
  568. }
  569. ////////////////////////////////////////////
  570. LearnHebbLP3::LearnHebbLP3(connection* con, float _maxWeight, float _TauLearnPre, float _TauLearnPost, float _BaseLine, float _LearnRate, bool _Accumulate): LearnPrePost(con, _maxWeight, _TauLearnPre, _TauLearnPost), LearnRate(_LearnRate), BaseLine(_LearnRate*_BaseLine), Accumulate(_Accumulate)
  571. {
  572. BaseLine = BaseLine*LearnRate;
  573. // see comment in LearnHebbLP3::proceede(int TotalTime)
  574. }
  575. int LearnHebbLP3::proceede(int TotalTime)
  576. {
  577. // learning rule: dw/dt = LearnRate*(LPpre-Lbaseline)*LPpost
  578. // in this implementation learn rate is used as LPpreInc
  579. // this is equivalent to the above formula if Lbaseline is adjusted correspondingly
  580. // Lbaseline_corr = Lbaseline*LearnRate
  581. // the advantage is, that there is one multiplication (with *LearnRate) less in the loop!
  582. // if const sum normalization is done Lbaseline shouldn't have any effect anyway.
  583. int t = int(TotalTime % MacroTimeStep);
  584. int i,j,k;
  585. // increase learning potentials for each spike of last time step
  586. int spike = SourceLayer->last_N_firings;
  587. if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
  588. {
  589. cout << "programming error, wrong firing indices \n";
  590. cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
  591. cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
  592. cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
  593. }
  594. // update presynaptic learning potential (for ltp)
  595. while (spike < SourceLayer->N_firings) LPpre[SourceLayer->firings[spike++][1]][t+Dmax] += LearnRate;
  596. // update
  597. spike = TargetLayer->last_N_firings;
  598. // cout << "ltp ";fflush(stdout);
  599. while (spike < TargetLayer->N_firings)
  600. {
  601. i= TargetLayer->firings[spike++][1];
  602. LPpost[i]+=1;
  603. // calculate LTP
  604. for (j=0;j<N_pre[i];j++) {
  605. *s_pre[i][j] += LPpost[i]*(LPpre[I_pre[i][j]][t+Dmax-D_pre[i][j]-1]-BaseLine);
  606. if (*s_pre[i][j] > maxWeight) *s_pre[i][j] = maxWeight;
  607. if (*s_pre[i][j] < minWeight) *s_pre[i][j] = minWeight;
  608. }
  609. // this spike was after pre-synaptic spikes
  610. // update postsynaptic potential
  611. }
  612. // decreas synaptic potentials
  613. for (i=0;i<ns;++i) LPpre[i][t+Dmax+1]= LPpre[i][t+Dmax] * dec_pre;
  614. for (i=0;i<nt;++i) LPpost[i]*=dec_post;
  615. }
  616. /////////////////////////////////////////////////////////
  617. LearnFBInh::LearnFBInh(connection* con, float _maxWeight, float TauLearnPre, float TauLearnPost, float _Cpre, float _Cdep): LearnPrePost(con, _maxWeight, TauLearnPre, TauLearnPost), Cpre(_Cpre), Cdep(_Cdep)
  618. {
  619. cout << "LearnFBInh: MaxWeight=" << maxWeight << "\n";
  620. }
  621. // LTP: according to pre->post pairing (exp learning potential)
  622. // LTD: for every presynaptic spike, proportional to current weight
  623. int LearnFBInh::proceede(int TotalTime)
  624. {
  625. int t = int(TotalTime % MacroTimeStep);
  626. int i,j,k;
  627. // increase learning potentials for each spike of last time step
  628. int spike = SourceLayer->last_N_firings;
  629. if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
  630. {
  631. cout << "programming error, wrong firing indices \n";
  632. cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
  633. cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
  634. cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
  635. }
  636. // update presynaptic learning potential (for ltp)
  637. while (spike < SourceLayer->N_firings) LPpre[SourceLayer->firings[spike++][1]][t+Dmax] = Cpre;
  638. // update
  639. spike = TargetLayer->last_N_firings;
  640. // cout << "ltp ";fflush(stdout);
  641. while (spike < TargetLayer->N_firings)
  642. {
  643. i= TargetLayer->firings[spike++][1];
  644. LPpost[i]=1;
  645. // calculate LTP
  646. for (j=0;j<N_pre[i];j++) {
  647. // *s_pre[i][j] += (Cp+ *s_pre[i][j]*gsl_ran_gaussian(gslr, SynNoiseSigma))*LPpre[I_pre[i][j]][t+Dmax-D_pre[i][j]-1];
  648. *s_pre[i][j] += LPpre[I_pre[i][j]][t+Dmax-D_pre[i][j]-1];
  649. if (*s_pre[i][j] > maxWeight) *s_pre[i][j] = maxWeight;
  650. }
  651. // this spike was after pre-synaptic spikes
  652. }
  653. for (i=0;i<ns;++i) LPpre[i][t+Dmax+1]= LPpre[i][t+Dmax] * dec_pre; //original: LPpre[i][t+Dmax+1]=0.95*LPpre[i][t+Dmax];
  654. for (i=0;i<nt;++i) LPpost[i]*=dec_post;
  655. // calculate LTD;
  656. float* CurWeight;
  657. k=SourceLayer->N_firings;
  658. while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!!
  659. {
  660. int SourceI=SourceLayer->firings[k][1];
  661. for (j=0; j< delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]]; j++)
  662. {
  663. CurWeight = &s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]];
  664. i=post[SourceLayer->firings[k][1]][delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]];
  665. // *CurWeight -= *CurWeight*Cd*LPpost[i];
  666. // float DeltaW = *CurWeight*(-Cd +gsl_ran_gaussian(gslr, SynNoiseSigma))* LPpost[i];
  667. // cout << DeltaW << " " << LPpost[i] << "|"; fflush(stdout);
  668. *CurWeight -= *CurWeight*Cdep; //*(1-LPpost[i]);
  669. if (*CurWeight < 0) *CurWeight=0;
  670. // s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]] -= LPpost[i]; // learning (ToDo: turn learning off/on)
  671. // cout << "after j=" << j << "i=" << i << "CW=" << *CurWeight << "\n";fflush(stdout);
  672. }
  673. }
  674. // cout << "fin "; fflush(stdout);
  675. }
  676. //////////////////////////////////////////////
  677. LearnFWInh::LearnFWInh(connection* con, float _maxWeight, float TauLearnPre, float TauLearnPost, float _Cpre, float _Cdep): LearnPrePost(con, _maxWeight, TauLearnPre, TauLearnPost), Cpre(_Cpre), Cdep(_Cdep)
  678. {
  679. cout << "LearnFWInh: MaxWeight=" << maxWeight << "\n";
  680. }
  681. // LTD: according to pre->post pairing (exp learning potential), proportional to current weight
  682. // LTP: for every postsynaptic spike
  683. int LearnFWInh::proceede(int TotalTime)
  684. {
  685. int t = int(TotalTime % MacroTimeStep);
  686. int i,j,k;
  687. // increase learning potentials for each spike of last time step
  688. int spike = SourceLayer->last_N_firings;
  689. if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
  690. {
  691. cout << "programming error, wrong firing indices \n";
  692. cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
  693. cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
  694. cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
  695. }
  696. // update presynaptic learning potential (for ltp)
  697. while (spike < SourceLayer->N_firings) LPpre[SourceLayer->firings[spike++][1]][t+Dmax] = Cpre;
  698. // update
  699. spike = TargetLayer->last_N_firings;
  700. while (spike < TargetLayer->N_firings)
  701. {
  702. i= TargetLayer->firings[spike++][1];
  703. LPpost[i]=1;
  704. // calculate LTP
  705. for (j=0;j<N_pre[i];j++) {
  706. *s_pre[i][j] -= *s_pre[i][j]*Cdep*LPpre[I_pre[i][j]][t+Dmax-D_pre[i][j]-1];
  707. if (*s_pre[i][j] < 0) *s_pre[i][j] = 0;
  708. }
  709. // this spike was after pre-synaptic spikes
  710. }
  711. for (i=0;i<ns;++i) LPpre[i][t+Dmax+1]= LPpre[i][t+Dmax] * dec_pre; //original: LPpre[i][t+Dmax+1]=0.95*LPpre[i][t+Dmax];
  712. for (i=0;i<nt;++i) LPpost[i]*=dec_post;
  713. // calculate LTD;
  714. float* CurWeight;
  715. k=SourceLayer->N_firings;
  716. while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!!
  717. {
  718. int SourceI=SourceLayer->firings[k][1];
  719. for (j=0; j< delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]]; j++)
  720. {
  721. CurWeight = &s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]];
  722. i=post[SourceLayer->firings[k][1]][delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]];
  723. *CurWeight += *CurWeight*Cpre;
  724. if (*CurWeight < 0) *CurWeight=0;
  725. }
  726. }
  727. }
  728. /////////////////////////////////////////////////////////
  729. LearnAntiHebb::LearnAntiHebb(connection* con, float _maxWeight, float TauLearnPre, float TauLearnPost, float _Cpre, float _Cdep): LearnPrePost(con, _maxWeight, TauLearnPre, TauLearnPost), Cpre(_Cpre), Cdep(_Cdep)
  730. {
  731. cout << "LearnAntiHebb: MaxWeight=" << maxWeight << "\n";
  732. }
  733. // Dummy// Dummy// Dummy// Dummy// Dummy// Dummy// Dummy
  734. // this object is not in use yet (use Learnhebblp2 instead)
  735. int LearnAntiHebb::proceede(int TotalTime)
  736. {
  737. int t = int(TotalTime % MacroTimeStep);
  738. int i,j,k;
  739. // increase learning potentials for each spike of last time step
  740. int spike = SourceLayer->last_N_firings;
  741. if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
  742. {
  743. cout << "programming error, wrong firing indices \n";
  744. cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
  745. cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
  746. cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
  747. }
  748. // update presynaptic learning potential (for ltp)
  749. while (spike < SourceLayer->N_firings) LPpre[SourceLayer->firings[spike++][1]][t+Dmax] = 1;
  750. // update
  751. spike = TargetLayer->last_N_firings;
  752. // cout << "ltp ";fflush(stdout);
  753. while (spike < TargetLayer->N_firings)
  754. {
  755. i= TargetLayer->firings[spike++][1];
  756. LPpost[i]=1;
  757. // calculate LTP
  758. for (j=0;j<N_pre[i];j++) {
  759. // *s_pre[i][j] += (Cp+ *s_pre[i][j]*gsl_ran_gaussian(gslr, SynNoiseSigma))*LPpre[I_pre[i][j]][t+Dmax-D_pre[i][j]-1];
  760. *s_pre[i][j] += Cpre*(1-LPpre[I_pre[i][j]][t+Dmax-D_pre[i][j]-1]);
  761. if (*s_pre[i][j] > maxWeight) *s_pre[i][j] = maxWeight;
  762. }
  763. // this spike was after pre-synaptic spikes
  764. }
  765. for (i=0;i<ns;++i) LPpre[i][t+Dmax+1]= LPpre[i][t+Dmax] * dec_pre; //original: LPpre[i][t+Dmax+1]=0.95*LPpre[i][t+Dmax];
  766. for (i=0;i<nt;++i) LPpost[i]*=dec_post;
  767. // calculate LTD;
  768. float* CurWeight;
  769. k=SourceLayer->N_firings;
  770. while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!!
  771. {
  772. int SourceI=SourceLayer->firings[k][1];
  773. for (j=0; j< delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]]; j++)
  774. {
  775. CurWeight = &s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]];
  776. i=post[SourceLayer->firings[k][1]][delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]];
  777. // *CurWeight -= *CurWeight*Cd*LPpost[i];
  778. // float DeltaW = *CurWeight*(-Cd +gsl_ran_gaussian(gslr, SynNoiseSigma))* LPpost[i];
  779. // cout << DeltaW << " " << LPpost[i] << "|"; fflush(stdout);
  780. *CurWeight -=Cdep*(1-LPpost[i]);
  781. if (*CurWeight < 0) *CurWeight=0;
  782. // s[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]] -= LPpost[i]; // learning (ToDo: turn learning off/on)
  783. // cout << "after j=" << j << "i=" << i << "CW=" << *CurWeight << "\n";fflush(stdout);
  784. }
  785. }
  786. // cout << "fin "; fflush(stdout);
  787. }
  788. //////////////////////////LearnBiPoo
  789. LearnBiPoo::LearnBiPoo(connection* con, float _maxWeight, float _LearnRate, float _TauPeak1, float _TauPeak2, float _Amp1, float _Amp2)
  790. : learning(con, _maxWeight), LearnRate(_LearnRate),
  791. TauPeak1(_TauPeak1), TauPeak2(_TauPeak2), Amp1(_Amp1), Amp2(_Amp2),
  792. LastSpikesPre(0), LastSpikesPost(0),
  793. PreLearnWindowLut(0),
  794. PostLearnWindowLut(0)
  795. {
  796. // Initialize lookup tables for learning window
  797. PreLearnWindowLut = AlphaFktLut(PreLutN, TauPeak1, LearnRate*Amp1, 0, dt);
  798. PostLearnWindowLut = AlphaFktLut(PostLutN, TauPeak2, LearnRate*Amp2, 0, dt);
  799. // initialize arrays for last spikes
  800. NewArray2d(LastSpikesPre, ns, M);
  801. for (int i=0; i<ns; ++i) for (int j=0;j<M;++j) {
  802. LastSpikesPre[i][j]=int(-PreLutN);
  803. }
  804. LastSpikesPost = new int [nt];
  805. for (int i=0;i<nt;++i) LastSpikesPost[i] = int(-PostLutN);
  806. }
  807. int LearnBiPoo::prepare(int step)
  808. {
  809. }
  810. int LearnBiPoo::proceede(int TotalTime)
  811. {
  812. int t = int(TotalTime % MacroTimeStep);
  813. int i,j,k, TDiff;
  814. int spike;
  815. int SourceI;
  816. // learn all postsynaptic neurons (target layer) that fired last time step
  817. spike = TargetLayer->last_N_firings;
  818. while (spike < TargetLayer->N_firings)
  819. {
  820. i= TargetLayer->firings[spike++][1];
  821. LastSpikesPost[i] = TotalTime;
  822. // calculate LTP
  823. for (j=0;j<N_pre[i];j++) {
  824. TDiff = TotalTime - LastSpikesPre[I_pre[i][j]][m_pre[i][j]];
  825. if ((TDiff>0) && (TDiff<PostLutN)) {
  826. *s_pre[i][j] += PostLearnWindowLut[TDiff];
  827. if (*s_pre[i][j] > maxWeight) *s_pre[i][j] = maxWeight;
  828. if (*s_pre[i][j] < minWeight) *s_pre[i][j] = minWeight;
  829. }
  830. }
  831. }
  832. // learn all presynaptic neurons with this time step arriving spikes
  833. float* CurWeight;
  834. int CurDelay;
  835. int PostI;
  836. int SynNumber;
  837. k=SourceLayer->N_firings;
  838. while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!!
  839. {
  840. SourceI=SourceLayer->firings[k][1];
  841. CurDelay=t-SourceLayer->firings[k][0];
  842. for (j=0; j< delays_length[SourceI][CurDelay]; j++)
  843. {
  844. SynNumber = delays[SourceI][CurDelay][j];
  845. LastSpikesPre[SourceI][SynNumber] = TotalTime;
  846. CurWeight = &s[SourceI][SynNumber];
  847. i=post[SourceI][SynNumber];
  848. TDiff = TotalTime - LastSpikesPost[i];
  849. if ((TDiff > 0) && (TDiff < PreLutN)) {
  850. *CurWeight += PreLearnWindowLut[TDiff];
  851. if (*CurWeight > maxWeight) *CurWeight = maxWeight;
  852. if (*CurWeight < minWeight) *CurWeight = minWeight;
  853. }
  854. }
  855. }
  856. }
  857. LearnBiPoo::~LearnBiPoo()
  858. {
  859. DeleteArray2d(LastSpikesPre, ns);
  860. delete [] LastSpikesPost;
  861. delete [] PreLearnWindowLut;
  862. delete [] PostLearnWindowLut;
  863. }
  864. ////////////////////LearnFroemkeDan
  865. LearnFroemkeDan::LearnFroemkeDan(
  866. connection* con, float _maxWeight, float _LearnRate,
  867. float _TauPreEff, float _TauPostEff,
  868. float _TauPeak1, float _TauPeak2, float _Amp1, float _Amp2)
  869. : learning(con, _maxWeight), LearnRate(_LearnRate),
  870. TauPreEff(_TauPreEff), TauPostEff(_TauPostEff),
  871. TauPeak1(_TauPeak1), TauPeak2(_TauPeak2), Amp1(_Amp1), Amp2(_Amp2),
  872. LastSpikesPre(0), LastSpikesPost(0),
  873. SecondLastSpikesPre(0), SecondLastSpikesPost(0),
  874. PreLearnWindowLut(0),
  875. PostLearnWindowLut(0),
  876. PreEfficacyLut(0), PostEfficacyLut(0)
  877. {
  878. // Initialize lookup tables for learning window
  879. // PreLearnWindowLut = AlphaFktLut(PreLutN, TauPeak1, LearnRate*Amp1, 0, dt);
  880. // PostLearnWindowLut = AlphaFktLut(PostLutN, TauPeak2, LearnRate*Amp2, 0, dt);
  881. PreLearnWindowLut = ExpDecayLut(PreLutN, TauPeak1, LearnRate*Amp1, 0, dt);
  882. PostLearnWindowLut = ExpDecayLut(PostLutN, TauPeak2, LearnRate*Amp2, 0, dt);
  883. // initialize synaptic efficacy lookup tables
  884. float PreEffLutBound = 5*TauPreEff;
  885. float PostEffLutBound = 5*TauPostEff;
  886. PreEffLutN = int(PreEffLutBound/dt);
  887. PostEffLutN = int(PostEffLutBound/dt);
  888. PreEfficacyLut = new float [PreEffLutN];
  889. PostEfficacyLut = new float [PostEffLutN];
  890. float t;
  891. for (int n=0;n<PreEffLutN;++n) {
  892. t = n*dt;
  893. PreEfficacyLut[n] = 1-exp(-t/TauPreEff);
  894. }
  895. for (int n=0;n<PostEffLutN;++n) {
  896. t = n*dt;
  897. PostEfficacyLut[n] = 1-exp(-t/TauPostEff);
  898. }
  899. // initialize arrays for last spikes
  900. NewArray2d(LastSpikesPre, ns, M);
  901. for (int i=0; i<ns; ++i) for (int j=0;j<M;++j) LastSpikesPre[i][j]=int(-PreLutN);
  902. LastSpikesPost = new int [nt];
  903. for (int i=0;i<nt;++i) LastSpikesPost[i] = int(-PostLutN);
  904. // initialize arrays for second last spikes
  905. NewArray2d(SecondLastSpikesPre, ns, M);
  906. for (int i=0; i<ns; ++i) for (int j=0;j<M;++j) SecondLastSpikesPre[i][j]=int(-PreLutN-1);
  907. SecondLastSpikesPost = new int [nt];
  908. for (int i=0;i<nt;++i) SecondLastSpikesPost[i] = int(-PostLutN-1);
  909. }
  910. int LearnFroemkeDan::prepare(int step)
  911. {
  912. }
  913. int LearnFroemkeDan::proceede(int TotalTime)
  914. {
  915. int t = int(TotalTime % MacroTimeStep);
  916. int i,j,k, TDiff, TDiffPre, TDiffPost;
  917. int spike;
  918. float EffPre, EffPost;
  919. int SourceI;
  920. int ConIndex;
  921. // learn all postsynaptic neurons (target layer) that fired last time step
  922. spike = TargetLayer->last_N_firings;
  923. while (spike < TargetLayer->N_firings)
  924. {
  925. i= TargetLayer->firings[spike++][1];
  926. SecondLastSpikesPost[i] = LastSpikesPost[i];
  927. LastSpikesPost[i] = TotalTime;
  928. TDiffPost = TotalTime - SecondLastSpikesPost[i];
  929. if (TDiffPost < PostEffLutN) {
  930. EffPost = PostEfficacyLut[TDiffPost];
  931. } else {
  932. EffPost =1;
  933. }
  934. // calculate LTP
  935. for (j=0;j<N_pre[i];j++) {
  936. SourceI = I_pre[i][j];
  937. ConIndex= m_pre[i][j];
  938. TDiff = TotalTime - LastSpikesPre[SourceI][ConIndex];
  939. TDiffPre = LastSpikesPre[SourceI][ConIndex] - SecondLastSpikesPre[SourceI][ConIndex];
  940. if (TDiffPre<PreEffLutN) {
  941. EffPre=PreEfficacyLut[TDiffPre];
  942. } else {
  943. EffPre=1;
  944. }
  945. if ((TDiff>0) && (TDiff<PostLutN)) {
  946. *s_pre[i][j] *= (1+ EffPost*EffPre*PostLearnWindowLut[TDiff]);
  947. }
  948. if (*s_pre[i][j] > maxWeight) *s_pre[i][j] = maxWeight;
  949. if (*s_pre[i][j] < minWeight) *s_pre[i][j] = minWeight;
  950. }
  951. }
  952. // learn all presynaptic neurons with this time step arriving spikes
  953. float* CurWeight;
  954. int CurDelay;
  955. int SynNumber;
  956. k=SourceLayer->N_firings;
  957. while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!!
  958. {
  959. SourceI=SourceLayer->firings[k][1];
  960. CurDelay=t-SourceLayer->firings[k][0];
  961. for (j=0; j< delays_length[SourceI][CurDelay]; j++)
  962. {
  963. // cout << "before j=" << j << "i=" << i << "CW=" << *CurWeight << "k=" << k << "\n";fflush(stdout);
  964. SynNumber = delays[SourceI][CurDelay][j];
  965. // calculate EffPre only once because in this loop it is allways the same
  966. // (same presynaptic neuron, same delay)
  967. if (j==0) {
  968. TDiffPre = TotalTime - LastSpikesPre[SourceI][SynNumber];
  969. if (TDiffPre<PreEffLutN) {
  970. EffPre = PreEfficacyLut[TDiffPre];
  971. } else {
  972. EffPre = 1;
  973. }
  974. }
  975. // save last spike timings
  976. SecondLastSpikesPre[SourceI][SynNumber] = LastSpikesPre[SourceI][SynNumber];
  977. LastSpikesPre[SourceI][SynNumber] = TotalTime;
  978. CurWeight = &s[SourceI][SynNumber];
  979. // // cout << "hereithappens"; fflush(stdout);
  980. i=post[SourceI][SynNumber];
  981. TDiff = TotalTime - LastSpikesPost[i];
  982. TDiffPost = LastSpikesPost[i] - SecondLastSpikesPost[i];
  983. if (TDiffPost < PostEffLutN) {
  984. EffPost = PostEfficacyLut[TDiffPost];
  985. } else {
  986. EffPost = 1;
  987. }
  988. if ((TDiff > 0) && (TDiff < PreLutN)) {
  989. *CurWeight *= (1+EffPre*EffPost*PreLearnWindowLut[TDiff]);
  990. if (*CurWeight > maxWeight) *CurWeight = maxWeight;
  991. if (*CurWeight < minWeight) *CurWeight = minWeight;
  992. }
  993. }
  994. }
  995. }
  996. LearnFroemkeDan::~LearnFroemkeDan()
  997. {
  998. DeleteArray2d(LastSpikesPre, ns);
  999. DeleteArray2d(SecondLastSpikesPre, ns);
  1000. delete [] LastSpikesPost;
  1001. delete [] SecondLastSpikesPost;
  1002. delete [] PreLearnWindowLut;
  1003. delete [] PostLearnWindowLut;
  1004. delete [] PreEfficacyLut;
  1005. delete [] PostEfficacyLut;
  1006. }
  1007. ////////////////////////////////////
  1008. LearnSjoestroem::LearnSjoestroem(
  1009. connection* con, float _maxWeight, float _LearnRate,
  1010. float _TauLTDep, float _TauLTPot, float _Amp1, float _Amp2)
  1011. : learning(con, _maxWeight), LearnRate(_LearnRate),
  1012. TauLTDep(_TauLTDep), TauLTPot(_TauLTPot), Amp1(_Amp1), Amp2(_Amp2),
  1013. LastSpikesPre(0), LastSpikesPost(0),
  1014. LTPotLearnWindowLut(0),
  1015. LTDepLearnWindowLut(0)
  1016. {
  1017. // Initialize lookup tables for learning window
  1018. LTPotLearnWindowLut = ExpDecayLut(LTPotLutN, TauLTPot, LearnRate*Amp2, 0, dt, 4);
  1019. LTDepLearnWindowLut = ExpDecayLut(LTDepLutN, TauLTDep, LearnRate*Amp1, 1, dt, 4);
  1020. // initialize arrays for last spikes
  1021. NewArray2d(LastSpikesPre, ns, M);
  1022. for (int i=0; i<ns; ++i) for (int j=0;j<M;++j) LastSpikesPre[i][j]=int(-LTPotLutN);
  1023. NewArray2d(LastSpikesPost, ns, M);
  1024. for (int i=0; i<ns; ++i) for (int j=0;j<M;++j) LastSpikesPost[i][j] = int(-LTDepLutN);
  1025. }
  1026. int LearnSjoestroem::prepare(int step)
  1027. {
  1028. }
  1029. int LearnSjoestroem::proceede(int TotalTime)
  1030. {
  1031. int t = int(TotalTime % MacroTimeStep);
  1032. int i,j,k, TDiff;
  1033. int spike;
  1034. int SourceI;
  1035. int ConIndex;
  1036. // learn all postsynaptic neurons (target layer) that fired last time step
  1037. spike = TargetLayer->last_N_firings;
  1038. while (spike < TargetLayer->N_firings)
  1039. {
  1040. i= TargetLayer->firings[spike++][1];
  1041. // calculate LTP
  1042. for (j=0;j<N_pre[i];j++) {
  1043. SourceI = I_pre[i][j];
  1044. ConIndex= m_pre[i][j];
  1045. TDiff = TotalTime - LastSpikesPre[SourceI][ConIndex];
  1046. if ((TDiff>0) && (TDiff<LTPotLutN)) { // LTP
  1047. *s_pre[i][j] += LTPotLearnWindowLut[TDiff];
  1048. // not storing postsynaptic spike time because LTP "wins" over LTD
  1049. if (*s_pre[i][j] > maxWeight) *s_pre[i][j] = maxWeight;
  1050. if (*s_pre[i][j] < minWeight) *s_pre[i][j] = minWeight;
  1051. } else { // no LTP --> store postsynaptic spike time for potential LTP
  1052. LastSpikesPost[SourceI][ConIndex] = TotalTime;
  1053. }
  1054. }
  1055. }
  1056. // learn all presynaptic neurons with this time step arriving spikes
  1057. float* CurWeight;
  1058. int CurDelay;
  1059. int SynNumber;
  1060. k=SourceLayer->N_firings;
  1061. while (t-SourceLayer->firings[--k][0] < maximumDelay) // not DMax!!!
  1062. {
  1063. SourceI=SourceLayer->firings[k][1];
  1064. CurDelay=t-SourceLayer->firings[k][0];
  1065. for (j=0; j< delays_length[SourceI][CurDelay]; j++)
  1066. {
  1067. // cout << "before j=" << j << "i=" << i << "CW=" << *CurWeight << "k=" << k << "\n";fflush(stdout);
  1068. SynNumber = delays[SourceI][CurDelay][j];
  1069. // save last spike timings
  1070. LastSpikesPre[SourceI][SynNumber] = TotalTime;
  1071. CurWeight = &s[SourceI][SynNumber];
  1072. // // cout << "hereithappens"; fflush(stdout);
  1073. i=post[SourceI][SynNumber];
  1074. TDiff = TotalTime - LastSpikesPost[SourceI][SynNumber];
  1075. if ((TDiff > 0) && (TDiff < LTDepLutN)) {
  1076. *CurWeight *= LTDepLearnWindowLut[TDiff];
  1077. if (*CurWeight > maxWeight) *CurWeight = maxWeight;
  1078. if (*CurWeight < minWeight) *CurWeight = minWeight;
  1079. }
  1080. }
  1081. }
  1082. }
  1083. LearnSjoestroem::~LearnSjoestroem()
  1084. {
  1085. DeleteArray2d(LastSpikesPre, ns);
  1086. DeleteArray2d(LastSpikesPost, ns);
  1087. // DeleteArray2d(SecondLastSpikesPre, ns);
  1088. // DeleteArray2d(SecondLastSpikesPost, ns);
  1089. delete [] LTPotLearnWindowLut;
  1090. delete [] LTDepLearnWindowLut;
  1091. // delete [] PreEfficacyLut;
  1092. // delete [] PostEfficacyLut;
  1093. }