vlearn.cpp 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. /*Copyright (C) 2005, 2006, 2007, 2008 Frank Michler, Philipps-University Marburg, Germany
  2. This program is free software; you can redistribute it and/or
  3. modify it under the terms of the GNU General Public License
  4. as published by the Free Software Foundation; either version 2
  5. of the License, or (at your option) any later version.
  6. This program is distributed in the hope that it will be useful,
  7. but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. GNU General Public License for more details.
  10. You should have received a copy of the GNU General Public License
  11. along with this program; if not, write to the Free Software
  12. Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  13. */
  14. #include "sys.hpp" // for libcwd
  15. #include "debug.hpp" // for libcwd
  16. #include "vlearn.hpp"
  17. veclearning::veclearning(VecConnection* c, float _maxWeight): SimElement(seLearning), con(c), maxWeight(_maxWeight), minWeight(0)
  18. {
  19. Name="VecLearning";
  20. cout << "Initialize VecLearning Object, maxWeight= " << maxWeight << "\n";
  21. int i,j,k;
  22. VecConnectionInfo ConInf = con->GetConnectionInfo();
  23. Dmax = ConInf.Dmax;
  24. maximumDelay = ConInf.maximumDelay;
  25. cout << "Dmax=" << Dmax << "\n";
  26. TargetLayer = ConInf.TargetLayer;
  27. SourceLayer = ConInf.SourceLayer;
  28. nt = TargetLayer->N;
  29. ns = SourceLayer->N;
  30. dt = TargetLayer->GetDt();
  31. MacroTimeStep = TargetLayer->GetMacroTimeStep();
  32. cout << "Initialize VecLearning Object: N_Target =" << nt << "\n";
  33. PSynWeights=ConInf.PSynWeights;
  34. PSynTargetNr=ConInf.PSynTargetNr;
  35. PSynSourceNr=ConInf.PSynSourceNr;
  36. PSynDelays=ConInf.PSynDelays;
  37. PPreSynNr=ConInf.PPreSynNr;
  38. Pdelays=ConInf.Pdelays;
  39. }
  40. veclearning::~veclearning()
  41. {
  42. cout << "veclearning Destructor\n"; fflush(stdout);
  43. }
  44. ///////////////////////////////////////
  45. VecLearnHebbLP2::VecLearnHebbLP2(VecConnection* con, float _MaxWeight, float TauDec, float _BaseLine, float _LearnSpeed, bool _Accumulate): veclearning(con, _MaxWeight), BaseLine(_BaseLine), LearnSpeed(_LearnSpeed), LtpInc(1), Accumulate(_Accumulate)
  46. {
  47. LtpDecFac = exp(-dt/TauDec);
  48. int i,j,k;
  49. NewArray2d(LTP,ns,1001+Dmax); // presynaptic trace (source)
  50. for (i=0;i<ns;i++) for (j=0;j<1001+Dmax;j++) LTP[i][j]=0.0;
  51. cout << "VecLearnHebbLP2\n";
  52. cout << "LearnSpeed = " << LearnSpeed << " TauDec=" << TauDec << "\n";
  53. cout << "Accumulate=" << Accumulate << "\n";
  54. cout << "initialize LTP\n";
  55. fflush(stdout);
  56. }
  57. VecLearnHebbLP2::~VecLearnHebbLP2()
  58. {
  59. DeleteArray2d(LTP,ns);
  60. }
  61. int VecLearnHebbLP2::WriteSimInfo(fstream &fw)
  62. {
  63. fw << "<" << seTypeString << " id=\"" << IdNumber << "\" Type=\"" << seType << "\" Name=\"" << Name << "\"> \n";
  64. fw << "</VecLearnHebbLP2> \n";
  65. fw << "<LearnConnection id=\"" << con->IdNumber << "\"/> \n";
  66. fw << "<MaxWeight Value=\"" << maxWeight << "\"/> \n";
  67. fw << "<LtpDecFac Value=\"" << LtpDecFac << "\"/> \n";
  68. fw << "<BaseLine Value=\"" << BaseLine << "\"/> \n";
  69. fw << "<LearnSpeed Value=\"" << LearnSpeed << "\"/> \n";
  70. fw << "<Accumulate Value=\"" << Accumulate << "\"/> \n";
  71. fw << "<LtpInc Value=\"" << LtpInc << "\"/> \n";
  72. fw << "</" << seTypeString << "> \n";
  73. }
  74. int VecLearnHebbLP2::proceede(int TotalTime)
  75. {
  76. // cout <<"L";fflush(stdout); //remove
  77. int t = int(TotalTime % MacroTimeStep);
  78. int i,j,k;
  79. // increase learning potentials for each spike of last time step
  80. int spike = SourceLayer->last_N_firings;
  81. if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
  82. {
  83. cerr << "this should never happen, I just don't know why ;-)\n";
  84. cerr << "programming error, wrong firing indices \n";
  85. cerr << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
  86. cerr << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
  87. cerr << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
  88. }
  89. // update presynaptic learning potential (for ltp)
  90. if (Accumulate) while (spike < SourceLayer->N_firings) LTP[SourceLayer->firings[spike++][1]][t+Dmax] += LtpInc;
  91. else while (spike < SourceLayer->N_firings) LTP[SourceLayer->firings[spike++][1]][t+Dmax] = LtpInc;
  92. // END update
  93. spike = TargetLayer->last_N_firings;
  94. while (spike < TargetLayer->N_firings)
  95. {
  96. i= TargetLayer->firings[spike++][1];
  97. // calculate LTP
  98. for (j=0;j<(*PPreSynNr)[i].size();j++) {
  99. T_NSynapses SynNr=(*PPreSynNr)[i][j];
  100. (*PSynWeights)[SynNr] += LearnSpeed*(LTP[(*PSynSourceNr)[SynNr]][t+Dmax-(*PSynDelays)[SynNr]-1] - BaseLine);
  101. if ((*PSynWeights)[SynNr] > maxWeight) {
  102. (*PSynWeights)[SynNr]=maxWeight;
  103. } else {
  104. if ((*PSynWeights)[SynNr] < minWeight) {
  105. (*PSynWeights)[SynNr]=minWeight;
  106. }
  107. }
  108. }
  109. }
  110. for (i=0;i<ns;++i) LTP[i][t+Dmax+1]= LTP[i][t+Dmax] * LtpDecFac;
  111. }
  112. int VecLearnHebbLP2::prepare(int step)
  113. {
  114. int i,j,k;
  115. for (i=0;i<ns;i++) // prepare for the next sec
  116. for (j=0;j<Dmax+1;j++)
  117. LTP[i][j]=LTP[i][MacroTimeStep+j];
  118. }
  119. //////////////////////////////////////
  120. ///////////////////////////////////
  121. VecLearnPrePost::VecLearnPrePost(VecConnection* con, float _maxWeight, float _TauLearnPre, float _TauLearnPost): veclearning(con, _maxWeight)
  122. {
  123. int i,j,k;
  124. NewArray2d(LPpre,ns,MacroTimeStep+1+Dmax); // presynaptic trace (source)
  125. LPpost = new float[nt]; // postsynaptic trace (target)
  126. dec_pre = exp(-dt/_TauLearnPre);
  127. dec_pre = exp(-dt/_TauLearnPost);
  128. cout << "initialize LPpre\n";
  129. fflush(stdout);
  130. for (i=0;i<ns;i++) for (j=0;j<MacroTimeStep+1+Dmax;j++) LPpre[i][j]=0.0;
  131. cout << "initialize LPpost\n"; fflush(stdout);
  132. for (i=0;i<nt;i++) LPpost[i]=0.0;
  133. cout << "initialized LPpost\n"; fflush(stdout);
  134. }
  135. VecLearnPrePost::~VecLearnPrePost()
  136. {
  137. cout << "VecLearnPrePost Destructor\n";fflush(stdout);
  138. DeleteArray2d(LPpre, ns);
  139. delete [] LPpost;
  140. }
  141. int VecLearnPrePost::prepare(int step)
  142. {
  143. int i,j,k;
  144. for (i=0;i<ns;i++) // prepare for the next sec
  145. for (j=0;j<Dmax+1;j++)
  146. LPpre[i][j]=LPpre[i][MacroTimeStep+j];
  147. }
  148. ///////////////////////
  149. VecLearnHebbLP3::VecLearnHebbLP3(VecConnection* con, float _maxWeight, float _TauLearnPre, float _TauLearnPost, float _BaseLine, float _LearnRate, bool _Accumulate): VecLearnPrePost(con, _maxWeight, _TauLearnPre, _TauLearnPost), LearnRate(_LearnRate), BaseLine(_LearnRate*_BaseLine), Accumulate(_Accumulate)
  150. {
  151. cout << "Initialize VecLearnHebbLP3\n";
  152. BaseLine = BaseLine*LearnRate;
  153. // see comment in LearnHebbLP3::proceede(int TotalTime)
  154. }
  155. int VecLearnHebbLP3::proceede(int TotalTime)
  156. {
  157. // learning rule: dw/dt = LearnRate*(LPpre-Lbaseline)*LPpost
  158. // in this implementation learn rate is used as LPpreInc
  159. // this is equivalent to the above formula if Lbaseline is adjusted correspondingly
  160. // Lbaseline_corr = Lbaseline*LearnRate
  161. // the advantage is, that there is one multiplication (with *LearnRate) less in the loop!
  162. // if const sum normalization is done Lbaseline shouldn't have any effect anyway.
  163. int t = int(TotalTime % MacroTimeStep);
  164. int i,j,k;
  165. // increase learning potentials for each spike of last time step
  166. int spike = SourceLayer->last_N_firings;
  167. if ((SourceLayer->firings[spike][0] != t) && (SourceLayer->last_N_firings <SourceLayer->N_firings ))
  168. {
  169. cout << "programming error, wrong firing indices \n";
  170. cout << "t=" << t << " SourceLayer->firings[spike][0] =" << SourceLayer->firings[spike][0] << "\n";
  171. cout << "SourceLayer->N_firings=" << SourceLayer->N_firings << "\n";
  172. cout << "SourceLayer->last_N_firings=" << SourceLayer->last_N_firings << "\n";
  173. }
  174. // update presynaptic learning potential (for ltp)
  175. while (spike < SourceLayer->N_firings) LPpre[SourceLayer->firings[spike++][1]][t+Dmax] += LearnRate;
  176. // update
  177. spike = TargetLayer->last_N_firings;
  178. // cout << "ltp ";fflush(stdout);
  179. while (spike < TargetLayer->N_firings)
  180. {
  181. i= TargetLayer->firings[spike++][1];
  182. LPpost[i]+=1;
  183. // calculate LTP
  184. for (j=0;j<(*PPreSynNr)[i].size();j++) {
  185. T_NSynapses SynNr=(*PPreSynNr)[i][j];
  186. (*PSynWeights)[SynNr] += LPpost[i]*(LPpre[(*PSynSourceNr)[SynNr]][t+Dmax-(*PSynDelays)[SynNr]-1]-BaseLine);
  187. if ((*PSynWeights)[SynNr] > maxWeight) (*PSynWeights)[SynNr] = maxWeight;
  188. if ((*PSynWeights)[SynNr] < minWeight) (*PSynWeights)[SynNr] = minWeight;
  189. }
  190. // this spike was after pre-synaptic spikes
  191. // update postsynaptic potential
  192. }
  193. // decreas synaptic potentials
  194. for (i=0;i<ns;++i) LPpre[i][t+Dmax+1]= LPpre[i][t+Dmax] * dec_pre;
  195. for (i=0;i<nt;++i) LPpost[i]*=dec_post;
  196. }
  197. /////////////////////////////////////////////////////////