#include "sys.hpp" // for libcwd #include "debug.hpp" // for libcwd #include "connection.hpp" #include "learn.hpp" #include "layer.hpp" #include "simloop.hpp" #include //////////////////////////////////// Connection::Connection(): SourceLayer(0), TargetLayer(0), InputPointer(0), ArrayOrderXFast(true) { Dmax=DMAX; maximumDelay = Dmax; } Connection::Connection(layer* SL, layer* TL, csimInputChannel _InputChannel, bool _nonself) : SimElement(seConnection), SourceLayer(SL), TargetLayer(TL), connectivity(0.1), Strength(0), cpu_time_used(0), AutoSave(true), NonSelf(_nonself), RewireOn(false), learn(false), InputChannel(_InputChannel), ArrayOrderXFast(true) { Name="Connection"; WeightFileName ="weights.dat"; dt = TargetLayer->GetDt(); MacroTimeStep = TargetLayer->GetMacroTimeStep(); InputPointer = TargetLayer->GetInputPointer(InputChannel); Dmax=DMAX; maximumDelay = Dmax; ns = SourceLayer->N; nt = TargetLayer->N; SourceNx=SourceLayer->Nx; SourceNy=SourceLayer->Ny; TargetNx=TargetLayer->Nx; TargetNy=TargetLayer->Ny; Dout(dc::con, "Connection::Connection " << SourceLayer->Name << "->"<Name); Dout(dc::con, "Ntarget= " << TargetLayer->N << " Nsource= " << ns << "TimeStep=" << dt << "ms InputNumber=" << int(InputChannel)); } Connection::~Connection() { Dout(dc::con, "Connection Destructor"); } bool Connection::Learning() { return learn; } layer* Connection::GetTargetLayer() { return TargetLayer; } int Connection::SetAutoSave(bool a) { AutoSave = a; } void Connection::SetNonSelf(bool value) { NonSelf=value; } int Connection::SetRewiring(float threshold, float _con, int _nnewcon, float _InitialWeight) { RewireOn=true; RewireMaxConnectivity=_con; RewireNNewConnections=_nnewcon; RewireThreshold=threshold; if (_InitialWeight == -1) { InitialWeight = 2*RewireThreshold; } else { InitialWeight=_InitialWeight; } Dout(dc::con, "Set up Rewiring, thres=" << RewireThreshold << " connectivity=" << RewireMaxConnectivity << ""); } void Connection::SetRewiringOff() { RewireOn=false; } /** check consistency of file header data. Used on weight file loading * * * @return true if consistent */ bool Connection::CheckHeaderConsistency() { bool consistent=true; if (float(SourceNx)*float(SourceNy)>ns) { cerr << "ERROR: float(SourceNx)*float(SourceNy)>ns \n"; consistent=false; } if (float(TargetNx)*float(TargetNy)>nt) { cerr << "ERROR: float(TargetNx)*float(TargetNy)>nt \n"; consistent=false; } if (maximumDelay>Dmax) { cerr << "ERROR: maximumDelay>Dmax \n"; consistent=false; } if (SourceLayer) { // Konsistenz-Prüfung, falls connection mit SourceLayer verbunden Dout(dc::con, "Check consistency with SourceLayer"); if (ns != SourceLayer->N) { cerr << "ERROR: SourceN doesn't fit\n"; fflush(stderr); consistent=false; } } if (TargetLayer) { // Konsistenz-Prüfung, falls connection mit TargetLayer verbunden Dout(dc::con, "Check consistency with TargetLayer"); if (nt != TargetLayer->N) { cerr << "ERROR: TargetN doesn't fit: nt=" << nt << "\n"; fflush(stderr); consistent=false; } if (maximumDelay > TargetLayer->Dmax) { cerr << "ERROR: maximumDelay > TargetDmax \n";fflush(stderr); consistent=false; } } return consistent; } /////////////////////////////////////// connection::connection(): learnobj(0) { Name="connection"; // initialize all pointers with 0 post=0; s=sd=0; delays_length=0; delays=0; N_pre=0; I_pre=D_pre=0; s_pre=sd_pre=0; N_post=0; } connection::connection(layer* SL, layer* TL, csimInputChannel _InputChannel, bool _nonself) : Connection(SL, TL, _InputChannel, _nonself), learnobj(0) { Name="connection"; // initialize all pointers with 0 post=0; s=sd=0; delays_length=0; delays=0; N_pre=0; I_pre=D_pre=0; s_pre=sd_pre=0; N_post=0; } connection::~connection() { Dout(dc::con, "connection Destructor"); if (learnobj != 0) delete learnobj; DeleteDynamicalArrays(); DeletePresynapticInfo(); } int connection::SetLearn(bool l) { if (learnobj != 0) learn = l; } int connection::SetLearnObj(learning* lo) { learnobj = lo; SetLearn(true); } int connection::ObserveSynapse(int SourceNumber, int MNumber, char* RecName) { StartRecorder(RecName); if (SourceNumber < ns) Observe_s = SourceNumber; else Observe_s=0; if (MNumber < M) Observe_m = MNumber; else Observe_m=0; } int connection::ObserveSynapsePrePost(int SourceNumber, int TargetNumber, char* RecName) { int TargetM=0; while((TargetM 0) { Dout(dc::con, "\n\nSSSSSSSSSSSSSSSSSSSSNumPreSyn=" << NumPreSyn << "\nSSSSSSSSSSSSS"); float** Buffer = new float* [NumPreSyn]; for (int i=0;iIdNumber << "\"/> \n"; sstr << "IdNumber << "\"/> \n"; sstr << " \n"; sstr << " \n"; sstr << " \n"; if (learnobj) sstr << learnobj->GetSimInfo(); SimElement::WriteSimInfo(fw, sstr.str()); } int connection::WriteSimInfo(fstream &fw, const string &ChildInfo) { stringstream sstr; sstr << "IdNumber << "\"/> \n"; sstr << "IdNumber << "\"/> \n"; sstr << ChildInfo; sstr << " \n"; sstr << " \n"; sstr << " \n"; if (learnobj) sstr << learnobj->GetSimInfo(); SimElement::WriteSimInfo(fw, sstr.str()); } ConnectionInfo connection::GetConnectionInfo() { ConnectionInfo info; info.Dmax = Dmax; info.maximumDelay = maximumDelay; info.MaxWeight = maxWeight; info.TargetLayer = TargetLayer; info.SourceLayer = SourceLayer; info.M = M; info.WeightPointer = s; info.WeightDerivativePointer = sd; info.delays_length = delays_length; info.delays = delays; info.post = post; info.maxN_pre = maxN_pre; info.I_pre = I_pre; info.N_pre = N_pre; info.s_pre = s_pre; info.sd_pre = sd_pre; info.D_pre = D_pre; info.m_pre = m_pre; return info; } int connection::InitializeDynamicalArrays(const int _N, const int _M, const int _Dmax) { // initialize dynamical arrays cout<<"initialize dynamical begins"; MainSimLoop->SetMaximumDelay(_Dmax); // indeces of postsynaptic neurons NewArray2d(post, _N, _M); for (int i=0;i<_N;++i) for (int j=0;j<_M;++j) post[i][j]=-1; // initialize with -1 (not connected) // matrix of synaptic weights NewArray2d(s, _N,_M); // NewArray2d(sd,_N,_M); weight derivatives, not used yet NewArray2d(delays_length, _N, _Dmax); // distribution of delays // initialize with zero for (int i=0;i<_N;++i) for (int j=0;j<_Dmax;++j) delays_length[i][j] = 0; NewArray3d(delays, _N, _Dmax, _M); // arrangement of delays Dout(dc::con,"initialize dynamical ends"); } int connection::DeleteDynamicalArrays() { if(post!=0) DeleteArray2d(post,ns); if(s!=0) DeleteArray2d(s,ns); if(sd!=0) DeleteArray2d(sd,ns); if(delays_length!=0) DeleteArray2d(delays_length,ns); if(delays!=0) DeleteArray3d(delays,ns,maximumDelay); } int connection::ConnectRandom(float c, float InitialWeights, float maxDelay, float minDelay, bool RandomDelays) { Strength=InitialWeights; maxWeight=1; connectivity=c; nt = TargetLayer->N; // float TAULEARN_pre=50; // dec_pre = exp(-1./TAULEARN_pre); // float TAULEARN_post=5; // dec_pre = exp(-1./TAULEARN_post); int N; ns = N = SourceLayer->N; if (ns != nt) NonSelf = false; // NonSelf makes sense only for layers with same size M= (int) floor(c*nt); maxN_pre = 3*ns*M/nt; if (maxN_pre >= ns) maxN_pre = ns; // because there are no double synapses Dout(dc::con, "M= " << M << " c= " << c << " ns= " << ns << "maxN_pre=" << maxN_pre); if (M>nt) Dout(dc::con, "ERROR: too many connections, reduce connectivity!!"); SetMinMaxDelay(maxDelay, minDelay); // initialize dynamical arrays InitializeDynamicalArrays(N, M, maximumDelay); int i,j,k,exists, r; Dout(dc::con, "connect"); for (i=0;iN; int NConnections = int(round(float(ns)*_Connectivity)); ConnectRandom2(NConnections, InitialWeights, maxDelay, minDelay, RandomDelays); } else { cerr << "ERROR in connection::ConnectRandom2: _Connectivity should be [0..1]\n"; } } int connection::ConnectRandom2(int NIncommingConnections, float InitialWeights, float maxDelay, float minDelay, bool RandomDelays) { // random connections, but connectivity from postsynaptic perspective // every postsynaptic neuron has the same number of incomming connections // source neurons have nearly the same number of outgoing connections (plusminus 1) Dout(dc::con, " connection::ConnectRandom2"); Strength=InitialWeights; maxWeight=1; nt = TargetLayer->N; int N; ns = N = SourceLayer->N; if (ns != nt) NonSelf = false; // NonSelf makes sense only for layers with same size int NConnections = NIncommingConnections*nt; connectivity = float(NIncommingConnections)/ns; float c = connectivity; // remove this M= (int) ceil(float(NConnections)/ns); bool MultipleSynapses=false; maxN_pre = NIncommingConnections; if (maxN_pre >= ns) { cerr << "ERROR in connection::ConnectRandom2: more incomming connections than source neurons\n"; MultipleSynapses = true; } Dout(dc::con, "M= " << M << " c= " << connectivity << " ns= " << ns << "maxN_pre=" << maxN_pre); Dout(dc::con, "NConnections=" << NConnections << "nt=" << nt << "ns=" << ns << "NIncommingConnections=" << NIncommingConnections << ""); if (M>nt) cerr << "ERROR: too many connections, reduce connectivity!!\n"; SetMinMaxDelay(maxDelay, minDelay); // initialize dynamical arrays InitializeDynamicalArrays(N, M, maximumDelay); for (int i=0;i > TmpPreI(nt); vector TmpNPost(ns); vector ConnectionPool(ns*M); for (i=0;i::iterator it=TmpPreI[itarget].begin();it!=TmpPreI[itarget].end();++it) { if ((*it)==isource) { exists = 1; // synapse already exists // cerr << "synapse allready exists\n"; } } ++Try; } while ((exists == 1) && (TryN; // float TAULEARN_pre=50; // dec_pre = exp(-1./TAULEARN_pre); // float TAULEARN_post=5; // dec_pre = exp(-1./TAULEARN_post); int N; ns = N = SourceLayer->N; if (ns != nt) NonSelf = false; // NonSelf makes sense only for layers with same size int MaxTarget = (_MaxTarget= ns) maxN_pre = ns; // because there are no double synapses Dout(dc::con, "M= " << M << " c= " << c << " ns= " << ns << "maxN_pre=" << maxN_pre); if (M>nt) Dout(dc::con, "ERROR: too many connections, reduce connectivity!!"); SetMinMaxDelay(maxDelay, minDelay); // initialize dynamical arrays InitializeDynamicalArrays(N, M, maximumDelay); int i,j,k,exists, r; Dout(dc::con, "connect"); for (i=0;iN; int ConnectionCount=0; int N; ns = N = SourceLayer->N; if (ns != nt) NonSelf = false; // NonSelf makes sense only for layers with same size int i,j,k,exists, r; // Dout(dc::con, "connect"); // double loop (ns, nt) goes throug every possible synapse and checks wether to connect or not int TargetCount; M=0; // typedef boost::multi_array int2d_array; // typedef int2d_array::index index; boost::multi_array TempPost(boost::extents[ns][nt]); boost::multi_array TempDistance(boost::extents[ns][nt]); for (i=0;i TempNpre (nt); for (i=0;iPos[j]).CyclicDistance(SourceLayer->Pos[i], basis); else Distance = (TargetLayer->Pos[j] - SourceLayer->Pos[i]).abs() ; if ((Distance < MaxConDistance) && (!NonSelf || (i!=j))) { TempDistance[i][TargetCount] = Distance; TempPost[i][TargetCount]=j; ++TempNpre[j]; // count presynaptic Targeting of postsynaptic neuron ++TargetCount; ++ConnectionCount; } } if (TargetCount > M) M=TargetCount; } for (j=0;j maxN_pre) maxN_pre = TempNpre[j]; Dout(dc::con, "M= " << M << " ns= " << ns << "maxN_pre=" << maxN_pre); Dout(dc::con, "ConnectionCount=" << ConnectionCount); // initialize dynamical arrays InitializeDynamicalArrays(N, M, maximumDelay); // copy temporary TempPost to post array for (i=0;i=maximumDelay) { CurDelay = maximumDelay; Dout(dc::con, "ERROR: delay too high"); } delays[i][CurDelay][delays_length[i][CurDelay]] = j; ++delays_length[i][CurDelay]; } } } Dout(dc::con, "connected"); SetupPresynapticInfo(); Dout(dc::con, "Connection initialized"); } int connection::ConnectGaussianColumnwise(float Sigma, float MaxWeight, int dimx, int dimy, bool Cyclic,bool Shifted, float maxDelay, float minDelay,bool divergent,bool notstraight) { Strength=MaxWeight; int shiftitornot=0; if (Shifted==true) shiftitornot=1; Dout(dc::con, "Connection::ConnectGaussianColumnwise"); nt = TargetLayer->N; int ConnectionCount=0; int N; ns = N = SourceLayer->N; if (ns != nt) NonSelf = false; // NonSelf makes sense only for layers with same size float ntns; if (divergent = true) ntns=(nt/dimy)/ns; else ntns=1; float dimxdimy=dimx/dimy; int i,x,y,j,exists, r; // Dout(dc::con, "connect"); fflush(stdout); // double loop (ns, nt) goes throug every possible synapse and checks wether to connect or not int TargetCount; M=0; // typedef boost::multi_array int2d_array; // typedef int2d_array::index index; boost::multi_array TempPost(boost::extents[ns][nt]); boost::multi_array TempDistance(boost::extents[ns][nt]); for (i=0;i TempNpre (nt); for (i=0;iPos[(Dist_help%dimx_help)/1000].CyclicDistance(SourceLayer->Pos[i], basis)); else Distance = (TargetLayer->Pos[x] - SourceLayer->Pos[i]).abs(); if ((Distance < MaxConDistance) && (!NonSelf || (i!=synapse_number))) { TempDistance[i][TargetCount] = Distance; TempPost[i][TargetCount]=synapse_number; // TempPost2[i][TargetCount][0]=k; // TempPost2[i][TargetCount][1]=j; ++TempNpre[synapse_number]; // count presynaptic Targeting of postsynaptic neuron ++TargetCount; ++ConnectionCount; } } } if (TargetCount > M) M=TargetCount; } for (j=0;j maxN_pre) maxN_pre = TempNpre[j]; Dout(dc::con, "M= " << M << " ns= " << ns << "maxN_pre=" << maxN_pre); Dout(dc::con, "ConnectionCount=" << ConnectionCount); // initialize dynamical arrays InitializeDynamicalArrays(N, M, maximumDelay); // copy temporary TempPost to post array for (i=0;i=maximumDelay) { CurDelay = maximumDelay; cerr << "ERROR: delay too high\n"; } delays[i][CurDelay][delays_length[i][CurDelay]] = synapse_number; ++delays_length[i][CurDelay]; } } } } Dout(dc::con, "connected"); SetupPresynapticInfo(); Dout(dc::con, "Connection initialized"); } int connection::ConnectIdenticalGaussian(float Sigma, float MaxWeight, float maxDelay, float minDelay, float mpx, float mpy) { Strength=MaxWeight; Dout(dc::con, "Connection::ConnectIdenticalGaussian"); nt = TargetLayer->N; int ConnectionCount=0; int N; ns = N = SourceLayer->N; if (ns != nt) NonSelf = false; // NonSelf makes sense only for layers with same size int i,j,k,exists, r; // Dout(dc::con, "connect"); fflush(stdout); // double loop (ns, nt) goes throug every possible synapse and checks wether to connect or not int TargetCount; M=0; // typedef boost::multi_array int2d_array; // typedef int2d_array::index index; boost::multi_array TempPost(boost::extents[ns][nt]); boost::multi_array TempDistance(boost::extents[ns][nt]); for (i=0;i TempNpre (nt); for (i=0;iPos[i] - ConstMP).abs() ; for (j=0;jPos[j]).CyclicDistance(SourceLayer->Pos[i], basis); if ((Distance < MaxConDistance) && (!NonSelf || (i!=j))) { TempDistance[i][TargetCount] = Distance; TempPost[i][TargetCount]=j; ++TempNpre[j]; // count presynaptic Targeting of postsynaptic neuron ++TargetCount; ++ConnectionCount; } } if (TargetCount > M) M=TargetCount; } for (j=0;j maxN_pre) maxN_pre = TempNpre[j]; Dout(dc::con, "M= " << M << " ns= " << ns << "maxN_pre=" << maxN_pre); Dout(dc::con, "ConnectionCount=" << ConnectionCount); // initialize dynamical arrays InitializeDynamicalArrays(N, M, maximumDelay); // copy temporary TempPost to post array for (i=0;i=maximumDelay) { CurDelay = maximumDelay; Dout(dc::con, "ERROR: delay too high"); } delays[i][CurDelay][delays_length[i][CurDelay]] = j; ++delays_length[i][CurDelay]; } } } Dout(dc::con, "connected"); SetupPresynapticInfo(); Dout(dc::con, "Connection initialized"); } // Connect according to a profile function that is defined on distance int connection::ConnectProfile(DistanceProfile* profile, float MaxWeight, float maxDelay, float minDelay, bool Cyclic) { Strength=MaxWeight; Dout(dc::con, "Connection::ConnectProfile"); nt = TargetLayer->N; int ConnectionCount=0; int N; ns = N = SourceLayer->N; if (ns != nt) NonSelf = false; // NonSelf makes sense only for layers with same size int i,j,k,exists, r; // Dout(dc::con, "connect"); fflush(stdout); // double loop (ns, nt) goes throug every possible synapse and checks wether to connect or not int TargetCount; M=0; // typedef boost::multi_array int2d_array; // typedef int2d_array::index index; Dout(dc::con, "MakeFirstTempArray, ns=" << ns << "nt=" << nt); // boost::multi_array TempPost(boost::extents[ns][nt]); int** TempPost; NewArray2d(TempPost, ns, nt); Dout(dc::con, "MakeSecondTempArray"); boost::multi_array TempDistance(boost::extents[ns][nt]); for (i=0;i TempNpre (nt); for (i=0;iGetMaxConDistance(0.05); float ConValue; for (i=0;iPos[j]).CyclicDistance(SourceLayer->Pos[i], basis); else Distance = (TargetLayer->Pos[j] - SourceLayer->Pos[i]).abs() ; if ((Distance < MaxConDistance) && (!NonSelf || (i!=j))) { TempDistance[i][TargetCount] = Distance; TempPost[i][TargetCount]=j; ++TempNpre[j]; // count presynaptic Targeting of postsynaptic neuron ++TargetCount; ++ConnectionCount; } } if (TargetCount > M) M=TargetCount; } for (j=0;j maxN_pre) maxN_pre = TempNpre[j]; Dout(dc::con, "M= " << M << " ns= " << ns << "maxN_pre=" << maxN_pre << ""); fflush(stdout); Dout(dc::con, "ConnectionCount=" << ConnectionCount << ""); // initialize dynamical arrays InitializeDynamicalArrays(N, M, maximumDelay); // copy temporary TempPost to post array for (i=0;iGetValue(TempDistance[i][j]); // CurDelay = minimumDelay + gsl_rng_uniform_int(gslr, DelayDiff+1); CurDelay = minimumDelay + int(TempDistance[i][j]*DelayDiff/MaxConDistance); // ToDo: FixMe!!!!!!!!!!!!!!!!!!! think a lot!!! if (CurDelay>=maximumDelay) { CurDelay = maximumDelay-1; Dout(dc::con, "ERROR: delay too high"); } delays[i][CurDelay][delays_length[i][CurDelay]] = j; ++delays_length[i][CurDelay]; } } } Dout(dc::con, "connected"); fflush(stdout); SetupPresynapticInfo(); Dout(dc::con, "Connection initialized"); } ///////// // Connect with given Weight matrix int connection::ConnectMatrix(const vector > &WeightMatrix, float maxDelay, float minDelay) { // Strength=MaxWeight; Dout(dc::con, "Connection::ConnectMatrix"); nt = TargetLayer->N; int ConnectionCount=0; int N; ns = N = SourceLayer->N; if (ns != nt) NonSelf = false; // NonSelf makes sense only for layers with same size int i,j,k,exists, r; // Dout(dc::con, "connect"); fflush(stdout); // double loop (ns, nt) goes throug every possible synapse and checks wether to connect or not int TargetCount; M=0; // typedef boost::multi_array int2d_array; // typedef int2d_array::index index; Dout(dc::con, "MakeFirstTempArray, ns=" << ns << "nt=" << nt << "");fflush(stdout); // boost::multi_array TempPost(boost::extents[ns][nt]); int** TempPost; NewArray2d(TempPost, ns, nt); Dout(dc::con, "MakeSecondTempArray");fflush(stdout); boost::multi_array TempDistance(boost::extents[ns][nt]); for (i=0;i TempNpre (nt); for (i=0;i M) M=TargetCount; } for (j=0;j maxN_pre) maxN_pre = TempNpre[j]; Dout(dc::con, "M= " << M << " ns= " << ns << "maxN_pre=" << maxN_pre << ""); fflush(stdout); Dout(dc::con, "ConnectionCount=" << ConnectionCount << ""); // initialize dynamical arrays InitializeDynamicalArrays(N, M, maximumDelay); // copy temporary TempPost to post array for (i=0;i=maximumDelay) { CurDelay = maximumDelay-1; Dout(dc::con, "ERROR: delay too high"); } delays[i][CurDelay][delays_length[i][CurDelay]] = j; ++delays_length[i][CurDelay]; } } } Dout(dc::con, "connected"); fflush(stdout); SetupPresynapticInfo(); Dout(dc::con, "Connection initialized"); } ////////// // circular connections, const number of postsynaptic connections int connection::ConnectCircular(float PreConnectivity, float MaxWeight, float maxDelay, float minDelay) { Strength=MaxWeight; Dout(dc::con, "Connection::ConnectProfile"); nt = TargetLayer->N; if (PreConnectivity > 1) PreConnectivity=1; if (PreConnectivity <0) Dout(dc::con, "connection::ConnectCircular: ERROR: PreConnectivity shouldn't be zero!!"); int NTargetConnections = int(PreConnectivity*nt); int ConnectionCount=0; int N; ns = N = SourceLayer->N; if (ns != nt) NonSelf = false; // NonSelf makes sense only for layers with same size int i,j,k,exists, r; // Dout(dc::con, "connect"); fflush(stdout); // double loop (ns, nt) goes throug every possible synapse and checks wether to connect or not int TargetCount; M=0; // typedef boost::multi_array int2d_array; // typedef int2d_array::index index; Dout(dc::con, "MakeFirstTempArray, ns=" << ns << "nt=" << nt << "");fflush(stdout); // boost::multi_array TempPost(boost::extents[ns][nt]); int** TempPost; NewArray2d(TempPost, ns, nt); Dout(dc::con, "MakeSecondTempArray");fflush(stdout); boost::multi_array TempDistance(boost::extents[ns][nt]); for (i=0;i TempNpre (nt); for (i=0;i DistMap; map CurPostDistances; map::iterator CurDist=CurPostDistances.begin(); for (i=0;iPos[j] - SourceLayer->Pos[i]).abs() ; CurPostDistances[Distance+0.000001*j] = j; //add minimal value to Distance, because each Distance must be unique (map has no duplicate keys) } CurDist=CurPostDistances.begin(); while((TargetCountsecond; if (!NonSelf || (i!=j)) { TempDistance[i][TargetCount] = CurDist->first; TempPost[i][TargetCount]=j; ++TempNpre[j]; // count presynaptic Targeting of postsynaptic neuron ++TargetCount; ++ConnectionCount; if (CurDist->first > MaxConDistance) MaxConDistance=CurDist->first; } ++CurDist; } if (TargetCount > M) M=TargetCount; } for (j=0;j maxN_pre) maxN_pre = TempNpre[j]; Dout(dc::con, "M= " << M << " ns= " << ns << "maxN_pre=" << maxN_pre << ""); fflush(stdout); Dout(dc::con, "ConnectionCount=" << ConnectionCount << ""); // initialize dynamical arrays InitializeDynamicalArrays(N, M, maximumDelay); // copy temporary TempPost to post array for (i=0;i=maximumDelay) { CurDelay = maximumDelay-1; Dout(dc::con, "ERROR: delay too high"); } delays[i][CurDelay][delays_length[i][CurDelay]] = j; ++delays_length[i][CurDelay]; } } } Dout(dc::con, "connected"); fflush(stdout); SetupPresynapticInfo(); Dout(dc::con, "Connection initialized"); } int connection::ConnectCircularPre(float Connectivity, float MaxWeight, float maxDelay, float minDelay) { Strength=MaxWeight; Dout(dc::con, "Connection::ConnectProfile"); nt = TargetLayer->N; int N; ns = N = SourceLayer->N; if (Connectivity > 1) Connectivity=1; if (Connectivity <0) Dout(dc::con, "connection::ConnectCircular: ERROR: Connectivity shouldn't be zero!!"); int NSourceConnections = int(Connectivity*ns); Dout(dc::con, "NSourceConnections=" << NSourceConnections << ""); int ConnectionCount=0; if (ns != nt) NonSelf = false; // NonSelf makes sense only for layers with same size int i,j,k,exists, r; // Dout(dc::con, "connect"); fflush(stdout); // double loop (ns, nt) goes throug every possible synapse and checks wether to connect or not M=0; // typedef boost::multi_array int2d_array; // typedef int2d_array::index index; Dout(dc::con, "MakeFirstTempArray, ns=" << ns << "nt=" << nt << "");fflush(stdout); // boost::multi_array TempPost(boost::extents[ns][nt]); int** TempPost; NewArray2d(TempPost, ns, nt); Dout(dc::con, "MakeSecondTempArray");fflush(stdout); boost::multi_array TempDistance(boost::extents[ns][nt]); for (i=0;i TargetCount(ns); //by default vectors are initialized with zero vector TempNpre (nt); float InitialWeights=1; int CurDelay; float MaxConDistance =0; SetMinMaxDelay(maxDelay, minDelay); maxN_pre = 0; float Distance; vector2d basis(1,1); SimpleTextProgressBar pgbar(ns); Dout(dc::con, "Getmaxdist");fflush(stdout); float ConValue; // for each presynaptic neuron calc a sorted Distance List, and then connect until NTargetConnections is reached // type map DistMap; map CurPostDistances; map::iterator CurDist=CurPostDistances.begin(); for (j=0;jPos[j] - SourceLayer->Pos[i]).abs() ; CurPostDistances[Distance+0.000001*i] = i; //add minimal value to Distance, because each Distance must be unique (map has no duplicate keys) } CurDist=CurPostDistances.begin(); while((TempNpre[j]second; if (!NonSelf || (i!=j)) { TempDistance[i][TargetCount[i]] = CurDist->first; TempPost[i][TargetCount[i]]=j; ++TempNpre[j]; // count presynaptic Targeting of postsynaptic neuron ++TargetCount[i]; ++ConnectionCount; if (CurDist->first > MaxConDistance) MaxConDistance=CurDist->first; } ++CurDist; } } for (i=0;i M) M=TargetCount[i]; for (j=0;j maxN_pre) maxN_pre = TempNpre[j]; Dout(dc::con, "M= " << M << " ns= " << ns << "maxN_pre=" << maxN_pre << ""); fflush(stdout); Dout(dc::con, "ConnectionCount=" << ConnectionCount << ""); // initialize dynamical arrays InitializeDynamicalArrays(N, M, maximumDelay); // copy temporary TempPost to post array for (i=0;i=maximumDelay) { CurDelay = maximumDelay-1; Dout(dc::con, "ERROR: delay too high"); } delays[i][CurDelay][delays_length[i][CurDelay]] = j; ++delays_length[i][CurDelay]; } } } Dout(dc::con, "connected"); fflush(stdout); SetupPresynapticInfo(); Dout(dc::con, "Connection initialized"); } int connection::ConnectGaussianProb(float Sigma, float MaxWeight, float maxDelay, float minDelay, float MinConDistance, float MaxConnectivity, bool Cyclic) { Strength=MaxWeight; Dout(dc::con, "Connection::ConnectGaussianProb"); nt = TargetLayer->N; int ConnectionCount=0; int N; ns = N = SourceLayer->N; if (ns != nt) NonSelf = false; // NonSelf makes sense only for layers with same size int i,j,k,exists, r; // Dout(dc::con, "connect"); fflush(stdout); // double loop (ns, nt) goes throug every possible synapse and checks wether to connect or not int TargetCount; M=0; // typedef boost::multi_array int2d_array; // typedef int2d_array::index index; Dout(dc::con, "make temporary arrays");fflush(stdout); boost::multi_array TempPost(boost::extents[ns][nt]); Dout(dc::con, "ready temporary arrays");fflush(stdout); for (i=0;i TempNpre (nt); for (i=0;iPos[j]).CyclicDistance(SourceLayer->Pos[i], basis); } else { Distance = (TargetLayer->Pos[j] - SourceLayer->Pos[i]).abs() ; } if ((Distance >= MinConDistance) && (gsl_rng_uniform(gslr) < MaxConnectivity*gauss(Distance,Sigma))) { TempPost[i][TargetCount]=j; ++TempNpre[j]; // count presynaptic Targeting of postsynaptic neuron ++TargetCount; ++ConnectionCount; } } if (TargetCount > M) M=TargetCount; } for (j=0;j maxN_pre) maxN_pre = TempNpre[j]; // now M and maxN_pre are calculated Dout(dc::con, "M= " << M << " ns= " << ns << "maxN_pre=" << maxN_pre << ""); fflush(stdout); Dout(dc::con, "ConnectionCount=" << ConnectionCount << ""); // initialize dynamical arrays InitializeDynamicalArrays(N, M, maximumDelay); // copy temporary TempPost to post array for (i=0;iN; int ConnectionCount=0; int N; ns = N = SourceLayer->N; if (ns != nt) NonSelf = false; // NonSelf makes sense only for layers with same size int i,j,k,exists, r; if (NonSelf) M=nt-1; else M=nt; maxN_pre = ns; SetMinMaxDelay(maxDelay, minDelay); // initialize dynamical arrays InitializeDynamicalArrays(N, M, maximumDelay); float InitialWeights=1; int CurM; for (i=0;iN; int ConnectionCount=0; int N; ns = N = SourceLayer->N; if (ns != nt) NonSelf = false; // NonSelf makes sense only for layers with same size int x_s,y_s,x_t,y_t,i,j,k,exists, r; if (NonSelf) M=nt-1; else M=nt; maxN_pre = ns; int ntns; if (divergentrow = true) ntns=tdimy/sdimy; else ntns=1; SetMinMaxDelay(maxDelay, minDelay); // initialize dynamical arrays InitializeDynamicalArrays(N, M, maximumDelay); float InitialWeights=1; int CurM; CurM=0; /* for (y_source=0;y_sourcePos[y_t].CyclicDistance(SourceLayer->Pos[y_s*sdimx], basis)); else Distance=y_t-y_s*ntns; if (divergentrow == true) { s[here][CurM]=MaxWeight*exp(-0.5*(Distance)*(Distance)/(sigma_divrow*sigma_divrow)); } //1./(sigma_divrow*sqrt(2.*3.14)) ++CurM; } } } } Dout(dc::con, "M= " << M << " ns= " << ns << "maxN_pre=" << maxN_pre << ""); fflush(stdout); Dout(dc::con, "connected"); fflush(stdout); SetupRandomDelays(); SetupPresynapticInfo(); Dout(dc::con, "Connection initialized "); } int connection::ConnectGradientFields(float MaxWeight, float xslope, float yslope, int tdimx, int tdimy, int sdimx, int sdimy, float maxDelay, float minDelay) { Strength=MaxWeight; Dout(dc::con, "Connection::ConnectGradientField "); nt = TargetLayer->N; int ConnectionCount=0; int N; ns = N = SourceLayer->N; if (ns != nt) NonSelf = false; // NonSelf makes sense only for layers with same size int x_s,y_s,x_t,y_t,i,j,k,exists, r; if (NonSelf) M=nt-1; else M=nt; maxN_pre = ns; SetMinMaxDelay(maxDelay, minDelay); // initialize dynamical arrays InitializeDynamicalArrays(N, M, maximumDelay); float InitialWeights=1; int CurM; CurM=0; float Distance; vector2d basis(1,1); for (x_s=0;x_s Dmax=" << "Dmax \n"; maximumDelay = Dmax-1; } if (minimumDelay > maximumDelay) { cout << "connection::SetMinMaxDelay: Initialization Parameter Error:" << " minimumDelay > maximumDelay\n"; minimumDelay = 0; } DelayDiff = maximumDelay-minimumDelay-1; cout << "MaximumDelay=" << maximumDelay << " MinimumDelay=" << minimumDelay << " DelayDiff= " << DelayDiff << "\n"; } int connection::ConnectSelf(float MaxWeight, float maxDelay, float minDelay) { Strength=MaxWeight; Dout(dc::con, "Connection::ConnectSelf"); nt = TargetLayer->N; int ConnectionCount=0; int N; ns = N = SourceLayer->N; int i,j,k,exists, r; M=1; maxN_pre = 1; SetMinMaxDelay(maxDelay, minDelay); // initialize dynamical arrays // InitializeDynamicalArrays(N, M, Dmax); InitializeDynamicalArrays(N, M, maximumDelay); int CurDelay; float InitialWeights=1; int MaxCon = min(ns,nt); int rnum; Dout(dc::con, "MaxCon=" << MaxCon << ""); for (i=0;iN; if (maxTarget>nt) { maxTarget=nt; Dout(dc::con, "ERROR: maxTarget > TargetLayer->N"); } int ConnectionCount=0; int N; ns = N = SourceLayer->N; if (ns != nt) NonSelf = false; // NonSelf makes sense only for layers with same size int i,j,k,exists, r; M=maxTarget; maxN_pre=ns; // initialize dynamical arrays SetMinMaxDelay(maxDelay, minDelay); InitializeDynamicalArrays(N, M, maximumDelay); float InitialWeights=1; for (i=0;i >& s_new) { if(s_new.size()==ns && s_new[0].size()==M) { for(int i=0;i= 0) { CurDelay = minimumDelay + getrandom(DelayDiff+1); delays[i][CurDelay][delays_length[i][CurDelay]] = j; ++delays_length[i][CurDelay]; } } } } int connection::SetupDelays(float maxDelay, float minDelay) { SetMinMaxDelay(maxDelay, minDelay); SetupDelays(); } int connection::SetupDelays() { // setup delays // connections must be allready set up!!! // ToDo: check this!! // ToDo: delays are set up systematicaly (ordered); this works only if the weights are chosen randomly!! // --> SetupDelaysRandom() needed!!!!!!!!! int i,j,k; int MeanDelays_Length = M/DelayDiff; cout << "MaximumDelay=" << maximumDelay << " MinimumDelay=" << minimumDelay << " MeanDelays_length= " << MeanDelays_Length << " DelayDiff= " << DelayDiff << "\n"; for (i=0;i=minimumDelay) { if ((M-ind)>MeanDelays_Length) delays_length[i][j]=MeanDelays_Length; else delays_length[i][j] = M-ind; } else { delays_length[i][j]=0; } for (k=0;kN; // float TAULEARN_pre=50; // dec_pre = exp(-1./TAULEARN_pre); // float TAULEARN_post=5; // dec_pre = exp(-1./TAULEARN_post); int N; ns = N = SourceLayer->N; if (ns != nt) NonSelf = false; // NonSelf makes sense only for layers with same size M= (int) floor(c*ns); maxN_pre = 3*ns*M/nt; Dout(dc::con, "M= " << M << " c= " << c << " ns= " << ns << "maxN_pre=" << maxN_pre << ""); fflush(stdout); minimumDelay=0; maximumDelay=Dmax; // SetMinMaxDelay(maxDelay, minDelay); if (M>nt) Dout(dc::con, "ERROR: too many connections, reduce connectivity!!"); if ( M%maximumDelay != 0) { Dout(dc::con, "ERROR: bad connectivity value!! M%D= " << M%maximumDelay << " !=0 \n" ); // M += 20 - (M%Dmax); M -= M%maximumDelay; Dout(dc::con, "reducing weights to M= " << M << ""); // M must be a multiple of Dmax for this random weight initialization; } // initialize dynamical arrays InitializeDynamicalArrays(N, M, maximumDelay); int i,j,k,exists, r; for (i=0;i maxN_pre) cout << "programming error: N_pre[i]=" << N_pre[i] <<" too high (higher than maxN_pre=" << maxN_pre << "); j=" << j << " i=" << i << "\n"; D_pre[i][N_pre[i]]=dd; // add the delay s_pre[i][N_pre[i]]=&s[j][ConIndex]; // pointer to the synaptic weight // sd_pre[i][N_pre[i]]=&sd[j][ConIndex];// pointer to the derivative ++N_pre[i]; } } } CheckPresynapticInfo_old(); } int connection::CheckPresynapticInfo_old() { // 1. // total number of connections: Sum of delays_length // should be equel to: Sum of N_pre Dout(dc::con, "Checking Presynaptic Info"); int i, j, k; int countPre, countPost; countPre=countPost=0; for (i=0;i maxN_pre) cout << "programming error: N_pre[i]=" << N_pre[i] <<" too high (higher than maxN_pre=" << maxN_pre << "); j=" << j << " i=" << i << "\n"; D_pre[i][N_pre[i]]=dd; // add the delay s_pre[i][N_pre[i]]=&s[j][ConIndex]; // pointer to the synaptic weight // sd_pre[i][N_pre[i]]=&sd[j][ConIndex];// pointer to the derivative ++N_pre[i]; } } } CheckPresynapticInfo(); } int connection::DeletePresynapticInfo() { if(N_pre!=0) { delete[] N_pre; N_pre=0; } if(I_pre!=0) { DeleteArray2d(I_pre,nt); I_pre=0; } if(D_pre!=0) { DeleteArray2d(D_pre,nt); D_pre=0; } if(s_pre!=0) { DeleteArray2d(s_pre,nt); s_pre=0; } if(sd_pre!=0) { DeleteArray2d(sd_pre,nt); sd_pre=0; } } int connection::Print() { for (int SourceNr=0;SourceNr T=" // << CurTarget << " PostIndex=" // << PostIndex << "m=" << SourceSynNumber // << " N_pre[T] =" << N_pre[CurTarget] << " "; // fflush(stdout); if (PostIndex == N_pre[CurTarget]) { cerr << "ERROR!!!!!!!!!! PresynapticInfo not correct!!!!!!\n"; cerr << "Postindex = " << PostIndex << " N_pre[CurTarget] = " << N_pre[CurTarget] << "\n"; } else { if ((s_pre[CurTarget][PostIndex] == &s[i][SourceSynNumber]) && (D_pre[CurTarget][PostIndex] == j) && (m_pre[CurTarget][PostIndex] == SourceSynNumber) ) { // Dout(dc::con, "correct "); } else { cerr << "ERROR \n"; cerr << "SourceNr=" << i << " SourceSynNr=" << SourceSynNumber << " TargetNr=" << CurTarget << "\n"; cerr << "s=s: " << (s_pre[CurTarget][PostIndex] == &s[i][SourceSynNumber]) << " Delay=" << j << " D_pre["<N_firings; // cpu_start = clock(); if (rec) rec->record(dt*TotalTime, s[Observe_s][Observe_m]); if (BinRec) BinRec->record(); while (t-(SourceLayer->firings[--k][0]) < maximumDelay) // Nur Spikes, die nicht laenger als maximumDelay in der Vergangeheit liegen werden berücksichtigt. firins-array besteht aus firings[SpikeNr][0]:zeitpunkte, firnigs[SpikeNr][1]:Neuronennummer { ipre = SourceLayer->firings[k][1]; for (j=0; j< delays_length[ipre][t-SourceLayer->firings[k][0]]; j++) { mi = delays[ipre][t-SourceLayer->firings[k][0]][j]; i=post[ipre][mi]; InputPointer[i]+=s[ipre][mi]; // ToDo: check wether the code above (new) does the same as the code below (old) // check performance!! // i=post[SourceLayer->firings[k][1]] [delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]]; // InputPointer[i]+=s[SourceLayer->firings[k][1]][delays[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]][j]]; } } // cpu_end = clock(); // cpu_time_used += ((double) (cpu_end - cpu_start)) / CLOCKS_PER_SEC; if (learn == true) { learnobj->proceede(TotalTime); } } int connection::prepare(int step) { SimElement::prepare(step); if ((learn == true) && (learnobj != 0)) { learnobj->prepare(); if (RewireOn) { Rewire(RewireThreshold, RewireMaxConnectivity); } if (AutoSave) Save(); } // Dout(dc::con, "PERFORMANCE: cpu-time = " << cpu_time_used << " sec"); } void connection::SetName(const char* _name) { SimElement::SetName(_name); WeightFileName = Name + "weights.dat"; Dout(dc::con, "xxxxxxxxxxxxxxxxWeightFileName = " << WeightFileName << ""); } int connection::SetFileName(char* FileName) { WeightFileName = FileName; Dout(dc::con, "Set WeightFileName to : " << WeightFileName << ""); } int connection::Save() { Save(WeightFileName.c_str()); } int connection::Save(int nr) { Save((WeightFileName + stringify(nr)).c_str()); } int connection::Save(const string& SaveWeightFileName) { int i,j,k; Dout(dc::con, " Save Con.file: " << SaveWeightFileName);fflush(stdout); FILE *fw; // save weights fw = fopen((DataDirectory+SaveWeightFileName).c_str(),"w"); fwrite(&ns, sizeof(ns), 1, fw); fwrite(&(SourceLayer->Nx), sizeof(SourceLayer->Nx), 1, fw); fwrite(&(SourceLayer->Ny), sizeof(SourceLayer->Ny), 1, fw); fwrite(&nt, sizeof(nt), 1, fw); fwrite(&(TargetLayer->Nx), sizeof(TargetLayer->Nx), 1, fw); fwrite(&(TargetLayer->Ny), sizeof(TargetLayer->Ny), 1, fw); fwrite(&M, sizeof(M), 1, fw); fwrite(&maximumDelay, sizeof(maximumDelay), 1, fw); fwrite(&maxN_pre, sizeof(maxN_pre), 1, fw); // NewArray2d(delays_length,N,Dmax); // distribution of delays // NewArray3d(delays,N,Dmax,M); // arrangement of delays for (i=0;ins) { cerr << "ERROR: maxN_pre>ns \n"; consistent=false; } } int connection::Load() { Load(WeightFileName.c_str()); } int connection::Load(const char* FileName) { Load(FileName, DataDirectory.c_str()); } int connection::Load(const char* FileName, const char* DirName) { int i,j,k; FILE *fw; std::string DirAndFileName = (std::string(DirName)+FileName); Dout(dc::con, "DirAndFileName=" << DirAndFileName << ""); fflush(stdout); const char* DFileName = DirAndFileName.c_str(); if (!fexist(DFileName)) { cerr << "\n\nERROR: connection file " << DFileName << " doesn't exist \n\n"; fflush(stderr); return(2); } else { Dout(dc::con, "\nLoadWeightFile: " << DFileName << ""); fflush(stdout); fw = fopen( (std::string(DirName)+FileName).c_str(), "r"); fread(&ns, sizeof(ns), 1, fw); fread(&SourceNx, sizeof(SourceNx), 1, fw); fread(&SourceNy, sizeof(SourceNy), 1, fw); fread(&nt, sizeof(nt), 1, fw); fread(&TargetNx, sizeof(TargetNx), 1, fw); fread(&TargetNy, sizeof(TargetNy), 1, fw); fread(&M, sizeof(M), 1, fw); fread(&maximumDelay, sizeof(maximumDelay), 1, fw); fread(&maxN_pre, sizeof(maxN_pre), 1, fw); cout << "ns=" << ns << "\n" << "nt=" << nt << "\n" << "SourceNx=" << SourceNx << "\n" << "SourceNy=" << SourceNy << "\n" << "TargetNx=" << TargetNx << "\n" << "TargetNy=" << TargetNy << "\n" << "M=" << M << "\n" << "maxN_pre=" << maxN_pre << "\n" << "maximumDelay=" << maximumDelay <<"\n" << "Dmax=" << Dmax << "\n"; fflush(stdout); bool SuccessfullyLoaded=CheckHeaderConsistency(); if (!SuccessfullyLoaded) { cerr << "\n\nERROR while trying to load " << FileName <<"\n"; cerr << "try option --NoLoadWeights \n\n"; fflush(stderr); return(1); } if (post == 0 && (s ==0) && (sd==0) && (delays_length==0) && (delays==0)) { // initialize dynamical arrays InitializeDynamicalArrays(ns, M, maximumDelay); // NewArray2d(post, ns, M); // indeces of postsynaptic neurons // NewArray2d(s, ns,M); NewArray2d(sd,ns,M); // NewArray2d(delays_length,ns,Dmax); // distribution of delays // NewArray3d(delays,ns,Dmax,M); // arrangement of delays for (i=0;i= ns) return -1; // find Weight in delays array for (int delay=0;delay 0.00001) { ++errors; cerr << "ERROR nr. " << errors << "\n"; fflush(stderr); } } } Dout(dc::con, "Checked Systematic Weights, errors=" << errors << ""); } // 2007/12/11: routine tested with condelins.cpp, seems to work int connection::DeleteWeightCorrectPreInfo(int SourceNr, int ConnectionNr, int SupposedDelay) { bool found = false; int WeightDelay=-1; int m=-1; if (SourceNr >= ns) return -1; // find Weight in delays array for (int delay=0;delay= nt) { cerr << "Error in InsertNewWeight: TargetNr too high\n"; return -1; } if (delay >= maximumDelay) { cerr << "ERROR in InsertNewWeight: delay >= maximumDelay\n"; return -1; } // find free position in post array int m=-1; for (int i=0;i SetNewWeights2 int connection::SetNewWeights(int NNewTargets, int NMaxTargets) { int i; int AvailableTargetNeurons [nt]; for (int SourceNr=0;SourceNr= NMaxTargets) { Dout(dc::con, "N_post[SourceNr] =" << N_post[SourceNr] << ">= NMaxTargets=" << NMaxTargets << ""); } else { for (i=0;i0) { int TargetIndex = gsl_rng_uniform_int(gslr, NAvailableTargetNeurons); int count=-1; TargetNr=-1; for (i=0;i AvailableSourceList; for (int SourceNr=0;SourceNr= NMaxTargets) { // Dout(dc::con, "N_post[SourceNr] =" << N_post[SourceNr] << ">= NMaxTargets=" << NMaxTargets << ""); // } else { // for (i=0;i0) { // int TargetIndex = gsl_rng_uniform_int(gslr, NAvailableTargetNeurons); // int count=-1; // TargetNr=-1; // for (i=0;i& SynList, int& MaxDelay) { // Dout(dc::con, "connection::GetSourceWeights"); SynList.clear(); float MaxWeight=0; MaxDelay=0; for(int CurDelay=0;CurDelayMaxWeight) { MaxWeight=CurWeight; } if (CurDelay>MaxDelay) { MaxDelay=CurDelay; } } } // Dout(dc::con, "MaxWeight=" << MaxWeight << ""); return MaxWeight; } /** return a vector of all synapses to target neuron * * @param [IN] index of target neuron * @param [OUT] output vector containing all synapses to target neuron nr CurTarget * @return maximum weight */ float connection::GetTargetWeights(int CurTarget, vector& SynList, int &MaxDelay) { // Dout(dc::con, "connection::GetTargetWeights"); SynList.clear(); float MaxWeight=0; MaxDelay=0; for (int j=0;jMaxWeight) { MaxWeight=CurWeight; } if (CurDelay>MaxDelay) { MaxDelay=CurDelay; } } // Dout(dc::con, "MaxWeight=" << MaxWeight << ""); return MaxWeight; } /** return maximum synaptic weight value * * @return maximum weight */ float connection::GetMaxWeight() { float MaxWeight=0; for (int CurSource=0;CurSourceMaxWeight) { MaxWeight=CurWeight; } } } } // Dout(dc::con, "MaxWeight=" << MaxWeight << ""); return MaxWeight; } int connection::GetMaxDelay() { int MaxDelay=0; for (int CurSource=0;CurSource0) { if (CurDelay>MaxDelay) { MaxDelay=CurDelay; } } } } return MaxDelay; } //////////////////////// DepressiveConnection::DepressiveConnection( layer* SL, layer* TL, csimInputChannel InputNumber, float _TauRec, float _U_se) : connection(SL, TL, InputNumber), // efficacy(boost::extents[SourceLayer->N][M]), // LastEpsp(boost::extents[SourceLayer->N][M]), U_SE(_U_se), TauRec(_TauRec/dt), U_se_fac(1-_U_se) { Dout(dc::con, "Initialize DepressiveConnection"); } DepressiveConnection::~DepressiveConnection() { DeleteArray2d(efficacy, SourceLayer->N); DeleteArray2d(LastEpsp, SourceLayer->N); } int DepressiveConnection::WriteSimInfo(fstream &fw) { stringstream sstr; sstr << " \n"; sstr << " \n"; connection::WriteSimInfo(fw, sstr.str()); } int DepressiveConnection::proceede(int TotalTime) { int t = TotalTime % MacroTimeStep; int i,j,k,mi,ipre; // calculate input for target layer k=SourceLayer->N_firings; // while (t-SourceLayer->firings[--k][0] < Dmax) while (t-SourceLayer->firings[--k][0] < maximumDelay) { ipre = SourceLayer->firings[k][1]; for (j=0; j< delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]]; j++) { mi = delays[ipre][t-SourceLayer->firings[k][0]][j]; i=post[ipre][mi]; if (TauRec > 0) { efficacy[ipre][mi] = (1-(1-efficacy[ipre][mi])*exp(-(t-LastEpsp[ipre][mi])/TauRec)); } else { efficacy[ipre][mi] =1; } InputPointer[i]+= s[ipre][mi] * efficacy[ipre][mi] ; efficacy[ipre][mi] *= U_se_fac; LastEpsp[ipre][mi] = t; } } if (learn == true) { learnobj->proceede(TotalTime); } } int DepressiveConnection::reset(int t) { for(int i=0;iprepare(); // Save(); // } connection::prepare(step); // Dout(dc::con, "PERFORMANCE: cpu-time = " << cpu_time_used << " sec"); int i,j; for (i=0;iN;++i) for (j=0;jN, M); NewArray2d(LastEpsp, SourceLayer->N, M); for (i=0;iN;++i) for (j=0;jN); } int FacilitativeConnection::InitializeDynamicalArrays( const int _N, const int _M, const int Dmax) { cout <<"FacilitativeConnection::InitializeDynamicalArrays\n"; DepressiveConnection::InitializeDynamicalArrays(_N,_M,Dmax); int i,j; NewArray2d(U_SEvalue, SourceLayer->N, M); for (i=0;iN;++i) for (j=0;jN_firings; // while (t-SourceLayer->firings[--k][0] < Dmax) float CurU_SE; while (t-SourceLayer->firings[--k][0] < maximumDelay) { ipre = SourceLayer->firings[k][1]; for (j=0; j< delays_length[SourceLayer->firings[k][1]][t-SourceLayer->firings[k][0]]; j++) { mi = delays[ipre][t-SourceLayer->firings[k][0]][j]; i=post[ipre][mi]; U_SEvalue[ipre][mi] *= exp(-(t-LastEpsp[ipre][mi])/UseTauDec); CurU_SE = UseConst + U_SEvalue[ipre][mi]; efficacy[ipre][mi] = (1-(1-efficacy[ipre][mi])*exp(-float(t-LastEpsp[ipre][mi])/TauRec)); // Dout(dc::con, efficacy[ipre][mi] << " "); //remove InputPointer[i]+= s[ipre][mi] * efficacy[ipre][mi] * CurU_SE; efficacy[ipre][mi] *= (1-CurU_SE); LastEpsp[ipre][mi] = t; U_SEvalue[ipre][mi] += (1-CurU_SE)*UseInc; } } if (BinRec) BinRec->record(); if (learn == true) { learnobj->proceede(TotalTime); } } int FacilitativeConnection::StartBinRec(int PreSynNumber) { int NumObserve = 2*M; float** Buffer = new float* [NumObserve]; for (int i=0;iN, PspDuration); } PspConnection::~PspConnection() { delete[] PspTemplate; DeleteArray2d(Psp,TargetLayer->N); }