123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460 |
- /*Copyright (C) 2005, 2006, 2007 Frank Michler, Philipps-University Marburg, Germany
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License
- as published by the Free Software Foundation; either version 2
- of the License, or (at your option) any later version.
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
- #include "sys.hpp" // for libcwd
- #include "debug.hpp" // for libcwd
- #include "vnormalize.hpp"
- ////////Normalize//////////////////////////////////
- VecNormalize::VecNormalize()
- : AbstractNormalize(), Target(0), NTarget(0), NSource(0)
- {
- }
- int VecNormalize::AddConnection(VecConnection* newcon)
- {
- if (Target == 0)
- {
- Target = newcon->GetTargetLayer();
- NTarget = Target->N;
- }
- if (Target == newcon->GetTargetLayer()) ConList.push_back(newcon);
- else {
- cerr << "ERROR: Target-Layer not the same\n";
- exit (1);
- }
- NSource += newcon->GetNSource();
- if (RewiringOn) {
- newcon->SetRewiringOff(); // if VecNormalize object does the rewiring the connection object should't do ot too!!
- }
- }
- int VecNormalize::WriteSimInfo(fstream &fw)
- {
- stringstream sstr;
- sstr << "<Target id=\"" << Target->IdNumber << "\"/> \n";
- SimElement::WriteSimInfo(fw, sstr.str());
- }
- int VecNormalize::WriteSimInfo(fstream &fw, const string &ChildInfo)
- {
- stringstream sstr;
- sstr << "<Target id=\"" << Target->IdNumber << "\"/> \n";
- sstr << "<RewiringOn value=\"" << RewiringOn << "\"/> \n";
- sstr << "<IncommingConnectivity value=\"" << IncommingConnectivity << "\"/> \n";
- sstr << "<SynDelThreshold value=\"" << SynDelThreshold << "\"/> \n";
- sstr << "<InitialWeights value=\"" << InitialWeights << "\"/>\n";
- sstr << ChildInfo;
- SimElement::WriteSimInfo(fw, sstr.str());
- }
- void VecNormalize::SetRewiring(float _SynDelThreshold, float _IncommingConnectivity, float _InitialWeights)
- {
- RewiringOn=true;
- IncommingConnectivity=_IncommingConnectivity;
- SynDelThreshold=_SynDelThreshold;
- InitialWeights=_InitialWeights;
- // turn off rewiring in VecConnection objects,
- // because now the normalization object handles the rewiring
- for(vector<VecConnection*>::iterator it=ConList.begin(); it !=ConList.end(); ++it)
- {
- (*it)->SetRewiringOff();
- }
- }
- int VecNormalize::Rewire()
- {
- int tar=0;
- int NCon=ConList.size();
- // delete low weights
- int NDeletedSynapses=0;
- for(vector<VecConnection*>::iterator it=ConList.begin(); it !=ConList.end(); ++it)
- {
- NDeletedSynapses += (*it)->DeleteLowWeights(SynDelThreshold);
- }
- cout << "VecNormalize::Rewire() TotalDeletedSynapses=" << NDeletedSynapses << "\n";
-
- int NMaxWeights = int(round(IncommingConnectivity*NSource));
- vector<int> NNewWeights(NTarget);
- for (tar=0;tar<NTarget;++tar) {
- NNewWeights[tar] = NMaxWeights;
- }
- vector<vector<int> > NFreeWeights(NCon,vector<int>(NTarget));
- vector<vector<int> > NSglConNewWeights(NCon,vector<int>(NTarget));
-
- // count incomming weights
- vector<int> TotalFreeWeights(NTarget);
- for (int ConNr=0;ConNr<NCon;++ConNr) {
- int CurNs=(ConList[ConNr])->ns;
- for (int tar=0;tar<NTarget;++tar) {
- int PreSynSize = ((ConList[ConNr])->PreSynNr[tar]).size();
- NNewWeights[tar] -= PreSynSize;
- NFreeWeights[ConNr][tar] = CurNs-PreSynSize;
- TotalFreeWeights[tar] += NFreeWeights[ConNr][tar];
- }
- }
-
- if (NDeletedSynapses >0) {
- for (tar=0;tar<NTarget;++tar) {
- if (NNewWeights[tar]>0) {
- cout << "NNewWeights["<<tar<<"]=" << NNewWeights[tar] << "\n";
- cout << "TotalFreeWeights[" <<tar << "]=" << TotalFreeWeights[tar] << "\n";
- }
- }
- }
-
- // NNewWeights auf connections aufteilen
- for (tar=0;tar<NTarget;++tar) {
- if (NNewWeights[tar] >0) {
- float CurNewWeights = float(NNewWeights[tar])/TotalFreeWeights[tar];
- for(int ConNr=0;ConNr<NCon;++ConNr) {
- NSglConNewWeights[ConNr][tar] = int(CurNewWeights*NFreeWeights[ConNr][tar]);
- NNewWeights[tar] -= NSglConNewWeights[ConNr][tar];
- }
-
- if (NNewWeights[tar] >= NCon) {
- cerr << "fatal ERROR: NNewWeights[tar] should be less than NConnections! (exiting)\n";
- exit(1);
- }
- while (NNewWeights[tar]>0) {
- int Winner = gsl_rng_uniform_int(gslr, NCon);
- ++NSglConNewWeights[Winner][tar];
- --NNewWeights[tar];
- }
- }
- }
-
- // setting new weights
- for (int ConNr=0;ConNr<NCon;++ConNr) {
- ConList[ConNr]->SetNewWeights(&(NSglConNewWeights[ConNr]), InitialWeights);
- }
- }
- int VecNormalize::prepare(int Step)
- {
- if (RewiringOn) Rewire();
- }
- //////////////////////////////
- /*! \brief normalizing synapit weights if firing rates are above a threshold
- NormFrequency: above this spike frequency normalization occurs
- Weights are multiplied with (1-NormFactor)
- if spike frequency is higher then weight reduction is larger
- maximum: (1-NormFactor*MaxNormFactor)
- a look up table is used to determine the current normalization factor,
- depending on the current spike frequency (time difference DeltaT between current spike and last spike):
- NormLut(DeltaT)=1-MaxNormFactor*NormFactor*exp(-DeltaT/Tau),
- weight is multiplied with NormLut(DeltaT)
- @param _NormFrequency threshold frequency, above this frequency normalization occurs
- @param _NormFactor weight normalization factor,
- if postsynaptic neuron fires with _NormFrequency, synaptic weight is mulitiplied with 1-_NormFactor
- @param _MaxNormFactor maximal normalization: (1-NormFactor*MaxNormFactor)
- If postsynaptic neuron fires with frequency higher than _NormFrequency, the normalization is stronger.
- For infinite firing rate normalization strength can raise up to _MaxNormFactor times.
- @author (fm)
- */
- VecFiringRateNormalize2::VecFiringRateNormalize2(
- float _NormFrequency, float _NormFactor, float _MaxNormFactor)
- :PostSynLastFirings(0),
- MaxNormFactor(_MaxNormFactor),
- NormFactor(_NormFactor),
- NormFrequency(_NormFrequency)
- {
-
- NormDeltaT = 1000./(NormFrequency*dt);
- Tau = NormDeltaT/log(MaxNormFactor);
-
- NormLut = ExpDecayLut(NormLutN, Tau, -MaxNormFactor*NormFactor, 1, dt, NormDeltaT/Tau);
- cout << "VecFiringRateNormalization2\n";
- cout << "NormLut=" << "\n";
- for (int i=0;i<NormLutN;++i) cout << NormLut[i] << "\n";
- cout << " Factor=" << NormFactor << " MaxNormFactor=" << MaxNormFactor << " Tau=" << Tau*dt << " ms\n";
- }
- int VecFiringRateNormalize2::WriteSimInfo(fstream &fw)
- {
- stringstream sstr;
- sstr << "<MaxNormFactor value=\"" << MaxNormFactor << "\"/>\n";
- sstr << "<NormFrequency value=\"" << NormFrequency << "\"/>\n";
- sstr << "<NormFactor value=\"" << NormFactor << "\"/>\n";
- VecNormalize::WriteSimInfo(fw, sstr.str());
- }
- int VecFiringRateNormalize2::AddConnection(VecConnection* newcon)
- {
- VecNormalize::AddConnection(newcon);
- if (PostSynLastFirings == 0)
- {
- PostSynLastFirings = new int [NTarget];
- int i;
- for (i=0;i<NTarget;++i)
- {
- PostSynLastFirings[i]=0;
- }
- }
- }
- int VecFiringRateNormalize2::proceede(int TotalTime)
- {
- int t = int(TotalTime % MacroTimeStep);
- int spike = Target->last_N_firings;
- int CurTarget;
- int i,j, TDiff;
- float NormFactor=1;
- while (spike < Target->N_firings) {
- CurTarget = Target->firings[spike][1];
- int TDiff = t-PostSynLastFirings[CurTarget];
- if (TDiff < NormLutN) {
- NormFactor = NormLut[TDiff];
- for(vector<VecConnection*>::iterator it=ConList.begin(); it !=ConList.end(); ++it)
- {
- (*it)->MultiplyTargetWeights(CurTarget, NormFactor);
- }
- }
- PostSynLastFirings[CurTarget]=t;
- ++spike;
- }
- }
- int VecFiringRateNormalize2::prepare(int Step)
- {
- VecNormalize::prepare(Step);
- int i;
- cout << "shifting lastspikes\n"; fflush(stdout);
- for (i=0;i<NTarget;++i) PostSynLastFirings[i] -= MacroTimeStep;
- //FixMe: what if neuron never fires?? prevent negative integer overflow??
- cout << "vnorm::prepared\n"; fflush(stdout);
- }
- //////////////////////////////
- VecConstSumNormalize::VecConstSumNormalize(float _WeightSum, bool _quadratic)
- : WeightSum(_WeightSum), quadratic(_quadratic)
- {
- cout << " VecConstSumNormalization\n";
- cout << " WeightSum=" << WeightSum << " quadratic=" << quadratic << " \n";
- }
- int VecConstSumNormalize::prepare(int Step)
- {
- }
- int VecConstSumNormalize::proceede(int TotalTime)
- {
- int t = int(TotalTime % MacroTimeStep);
- int spike = Target->last_N_firings;
- int CurTarget;
- int i,j;
- float CurWeightSum, NormFactor;
- float tmpweight;
- if (quadratic)
- {
- cerr << " VecConstSumNormalize::proceede/QUADRATIC noch nicht implementiert}\n";
- exit(1);
- // while (spike < Target->N_firings) {
- // CurTarget = Target->firings[spike][1];
- // // calculate WeightSum
- // CurWeightSum=0;
- // for(vector<connection*>::iterator it=ConList.begin(); it !=ConList.end(); ++it)
- // {
- // for (i=0;i<(*it)->N_pre[CurTarget];++i) {
- // tmpweight= (*((*it)->s_pre[CurTarget][i]));
- // if (tmpweight <0) { // delete this thread
- // cout << "EEEEEEEEEERRRROOORRR, weight deletion didn't work\n";
- // fflush(stdout);
- // exit(2);
- // }
- // CurWeightSum += (*((*it)->s_pre[CurTarget][i]))*(*((*it)->s_pre[CurTarget][i]));
- // }
- // }
- // // DEBUG
- // if (CurWeightSum > 100) {
- // for(vector<connection*>::iterator it=ConList.begin(); it !=ConList.end(); ++it)
- // {
- // cout << "N_pre=" << (*it)->N_pre[CurTarget] << "\n";
- // for (i=0;i<(*it)->N_pre[CurTarget];++i) {
- // tmpweight= (*((*it)->s_pre[CurTarget][i]));
- // cout << "w"<< i << "=" << tmpweight << "I_pre=" << (*it)->I_pre[CurTarget][i] << "\n";
- // }
- // }
- // fflush(stdout);
- // exit(2);
- // }
- // // END DEBUG
- // NormFactor = WeightSum/sqrt(CurWeightSum);
- // cout << "NormFactor=" << NormFactor << "WeightSum" << CurWeightSum << "\n";
- // for(vector<connection*>::iterator it=ConList.begin(); it !=ConList.end(); ++it)
- // {
- // for (i=0;i<(*it)->N_pre[CurTarget];++i) *((*it)->s_pre[CurTarget][i]) *= NormFactor;
- // }
- // ++spike;
- // }
- } else {
- while (spike < Target->N_firings) {
- CurTarget = Target->firings[spike][1];
- // calculate WeightSum
- CurWeightSum=0;
- int Npre;
- for(TVecConnectionList::iterator it=ConList.begin(); it !=ConList.end(); ++it)
- {
- // Npre=((*it)->PreSynNr)[CurTarget].size();
- // for (i=0;i<Npre;++i) CurWeightSum += ((*it)->SynWeights) [((*it)->PreSynNr)[CurTarget][i]];
- CurWeightSum += (*it)->GetWeightSum(CurTarget, false);
- }
- NormFactor = WeightSum/CurWeightSum;
- // multiplicatively normalize weights
- for(TVecConnectionList::iterator it=ConList.begin(); it !=ConList.end(); ++it)
- {
- (*it)->MultiplyTargetWeights(CurTarget, NormFactor);
- }
- ++spike;
- }
-
- }
- }
- int VecConstSumNormalize::NormalizeAll()
- {
- cout << "Normalizing All ...";
- int CurTarget;
- int i,j;
- float CurWeightSum, NormFactor;
- if (quadratic)
- {
- cerr << " VecConstSumNormalize::proceede/QUADRATIC noch nicht implementiert}\n";
- exit(1);
- // for (CurTarget=0;CurTarget<Target->N;++CurTarget)
- // {
- // CurWeightSum=0; // wie oben
- // for(vector<connection*>::iterator it=ConList.begin(); it !=ConList.end(); ++it)
- // {
- // for (i=0;i<(*it)->N_pre[CurTarget];++i) CurWeightSum += (*((*it)->s_pre[CurTarget][i]))*(*((*it)->s_pre[CurTarget][i]));
- // }
- // NormFactor = WeightSum/sqrt(CurWeightSum);
- // for(vector<connection*>::iterator it=ConList.begin(); it !=ConList.end(); ++it)
- // {
- // for (i=0;i<(*it)->N_pre[CurTarget];++i) *((*it)->s_pre[CurTarget][i]) *= NormFactor;
- // }
- // }
- }
- else {
- for (CurTarget=0;CurTarget<Target->N;++CurTarget)
- {
- CurWeightSum=0;
- // calculate Weight Sum
- for(TVecConnectionList::iterator it=ConList.begin(); it !=ConList.end(); ++it)
- {
- CurWeightSum += (*it)->GetWeightSum(CurTarget, false);
- }
- NormFactor = WeightSum/CurWeightSum;
- // multiplicatively normalize weights
- for(TVecConnectionList::iterator it=ConList.begin(); it !=ConList.end(); ++it)
- {
- (*it)->MultiplyTargetWeights(CurTarget, NormFactor);
- }
- }
- }
- cout << " [done]\n";
- }
- // ToDo: diese Funktion testen
- // setzt die Gewichtssumme entsprechend des Durchschnitts aller Gewichte
- void VecConstSumNormalize::CalcInitWeightSum()
- {
- cout << "Calculating Initial Weight Sum ...";
- cerr << "VecConstSumNormalize::CalcInitWeightSum() is not tested\n";
- exit(1);
- int CurTarget;
- int i,j;
- float CurWeightSum, NormFactor;
- float TmpWeightSum=0;
- if (quadratic)
- {
- cerr << " VecConstSumNormalize::proceede/QUADRATIC noch nicht implementiert}\n";
- exit(1);
- // for (CurTarget=0;CurTarget<Target->N;++CurTarget)
- // {
- // CurWeightSum=0; // wie oben
- // for(vector<connection*>::iterator it=ConList.begin(); it !=ConList.end(); ++it)
- // {
- // for (i=0;i<(*it)->N_pre[CurTarget];++i) CurWeightSum += (*((*it)->s_pre[CurTarget][i]))*(*((*it)->s_pre[CurTarget][i]));
- // }
- // TmpWeightSum += sqrt(CurWeightSum);
- // }
- // WeightSum = TmpWeightSum/Target->N;
- }
- else {
- for (CurTarget=0;CurTarget<Target->N;++CurTarget)
- {
- CurWeightSum=0;
- // calculate Weight Sum
- for(TVecConnectionList::iterator it=ConList.begin(); it !=ConList.end(); ++it)
- {
- CurWeightSum += (*it)->GetWeightSum(CurTarget, false);
- }
- TmpWeightSum += CurWeightSum;
- }
- WeightSum = TmpWeightSum/Target->N;
- }
- cout << " [done]\n";
- }
- float VecConstSumNormalize::GetWeightSum()
- {
- return WeightSum;
- }
- int VecConstSumNormalize::SetWeightSum(float NewWeightSum)
- {
- WeightSum=NewWeightSum;
- }
- int VecConstSumNormalize::WriteSimInfo(fstream &fw)
- {
- stringstream sstr;
- sstr << "<WeightSum Value=\"" << WeightSum << "\"/> \n";
- sstr << "<Quadratic Value=\"" << quadratic << "\"/> \n";
- VecNormalize::WriteSimInfo(fw, sstr.str());
- }
- //////////////////////////////
|