Browse Source

Upload files to ''

Oscar Portoles 2 years ago
parent
commit
726d09c004

+ 132 - 0
ROIrelevance.py

@@ -0,0 +1,132 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Feb 23 09:51:31 2021
+
+@author: Oscar Portoles
+"""
+
+import numpy as np
+import sys
+import visArchis_seq as v
+# import optimProbs as oudp
+import modelStages as udp
+import copy
+
+nSample = 1000
+topNbest = 10000 #~the best 1%
+
+d = v.LoadData()
+d.nameModel = 'stgSequence_Lopt_allPu1500_wE_P040_optIni_v5_fixG0p15L0p6_Stg'
+d.nameEnding =''
+d.stages = [1,2,3,4] # to read file names
+saveTo = './dataSave/relevanceOfPulses_P030_G0p15L0p6.npz'
+
+# get data
+pulse   = d.getPulses()
+fit     = d.getFitness()
+m       = d.getBestModel()
+# Merge all islands
+allPulse, allCost, allsCor, allRenv, allFC = [None]*pulse.nStg, [None]*pulse.nStg, [None]*pulse.nStg, [None]*pulse.nStg, [None]*pulse.nStg
+for sIx in range(pulse.nStg):
+    mergeP_, mergeC_, mergeScor_, mergeRenv_ = [], [], [], [] 
+    for iIx in range(pulse.nIsla):
+        mergeP_.append(pulse.d[iIx][sIx])
+        mergeC_.append(fit.cost[iIx][sIx])
+        mergeScor_.append(fit.sCor[iIx][sIx].ravel())
+        mergeRenv_.append(np.squeeze(fit.renv[iIx][sIx]))
+    allPulse[sIx] = np.vstack(mergeP_)
+    allCost[sIx] = np.hstack(mergeC_)
+    allsCor[sIx] = np.hstack(mergeScor_)
+    allRenv[sIx] = np.vstack(mergeRenv_)
+
+# Keep top N best solutions    
+pulseTh, costTh, fcTh, sCorTh, rEnvTh = [None]*4, [None]*4, [None]*4, [None]*4, [None]*4
+minCostTh = np.zeros(4)
+for i in range(4):
+    pulseTh[i]  = allPulse[i][ np.argsort(allCost[i])[-topNbest:], :]
+    costTh[i]   = allCost[i][  np.argsort(allCost[i])[-topNbest:]]
+    sCorTh[i]   = allsCor[i][  np.argsort(allCost[i])[-topNbest:]]
+    rEnvTh[i]   = allRenv[i][  np.argsort(allCost[i])[-topNbest:], :]
+    minCostTh[i]= np.min(costTh[i])
+iXpick   = np.random.choice(np.arange(topNbest), size=nSample, replace=False)
+  
+del pulse, fit, allPulse, allCost, allRenv, allsCor
+
+# best sequence of pulses and its scores
+pulseBest = np.zeros((m.nroi,m.nStg))
+fitBest   = np.zeros(m.nStg)
+sCorBest  = np.zeros(m.nStg)
+rEnvBest  = np.zeros((m.nStg,68))
+for i in range(m.nStg):
+    pulseBest[:,i]  = m.logs[i][0][0]
+    fitBest[i]      = m.logs[i][0][3]
+    sCorBest[i]     = m.logs[i][0][6] 
+    rEnvBest[i,:]   = np.squeeze(m.logs[i][0][9])
+
+fitRef  = np.zeros((nSample,4))
+sCorRef = np.zeros((nSample,4))
+rEnvRef = np.zeros((nSample,4,68))
+pliRef  = np.zeros((nSample,2278,4))
+fitTe   = np.zeros((nSample,68,4))
+sCorTe  = np.zeros((nSample,68,4))
+rEnvTe  = np.zeros((nSample,68,4,68))
+pulseTe = np.zeros((nSample,68,4))
+
+for iXt in range(nSample):
+    for iXs in range(4):
+        pulseRand = pulseTh[iXs][iXpick[iXt],:] # pulse to be tested
+        pulseTe[iXt,:,iXs] = pulseRand
+        stages = list(range(iXs+1))
+        if len(stages) == 1:
+            kao = udp.KAOmodel(stageIx=stages[0])
+        else:
+            kao = udp.KAOmodel(stageIx=stages)
+        kao._setHistDelayAndContainers()
+        kao._edgesImpulse()  
+        kao._getEmpiReativEnv()
+        kao.doNoise(variance=0)
+        # do pulses
+        pulseTest = copy.deepcopy(pulseBest[:,stages])
+        pulseTest[:,iXs] = pulseRand
+        kao.pulse = pulseTest
+        
+        kao.runModel_Pulsein_sequence() # noise is zeros
+        
+        fit_ = kao.similarityAllPLIs()
+        fitRef[iXt, iXs] = fit_[iXs]
+        pliRef[iXt, :, iXs] = kao.pli[iXs,:]
+        
+        kao.relativeEnvBuPre()
+        kao.getScorrelRelEnv()
+        sCorRef[iXt, iXs] = kao.sCor[iXs]   
+        rEnvRef[iXt, iXs,:] = kao.relEnv[:,iXs]  
+        # iterate region to get relevance with a missing pulse
+        for iXroi in range(68):
+            if len(stages) == 1:
+                kao = udp.KAOmodel(stageIx=stages[0])
+            else:
+                kao = udp.KAOmodel(stageIx=stages)            
+            kao._setHistDelayAndContainers()
+            kao._edgesImpulse()  
+            kao._getEmpiReativEnv()
+            kao.doNoise(variance=0)
+            # do pulses
+            pulseTest = copy.deepcopy(pulseBest[:,stages])
+            pulseTest[:,iXs] = pulseRand
+            pulseTest[iXroi,iXs] = 0
+            kao.pulse = pulseTest
+            
+            kao.runModel_Pulsein_noise()
+    
+            fit_ = kao.similarityAllPLIs()
+            fitTe[iXt, iXroi, iXs] = fit_[iXs]
+            
+            kao.relativeEnvBuPre()
+            kao.getScorrelRelEnv()
+            sCorTe[iXt, iXroi, iXs]   = kao.sCor[iXs]
+            rEnvTe[iXt, iXroi, iXs,:] = kao.relEnv[:,iXs]
+            
+np.savez(saveTo, rEnvTe=rEnvTe, sCorTe=sCorTe, fitTe=fitTe, pulseTe=pulseTe, pulseTh=pulseTh,costTh=costTh,iXpick=iXpick,
+         fitRef=fitRef, sCorRef=sCorRef, pliRef=pliRef, rEnvRef=rEnvRef, sCorTh=sCorTh, rEnvTh=rEnvTh,
+         pulseBest=pulseBest, fitBest=fitBest, sCorBest=sCorBest, rEnvBest=rEnvBest)

+ 77 - 0
acrossTrialFCandEnvelopeFig.m

@@ -0,0 +1,77 @@
+% PLI across trials and relative change in envelope amplitude time-locked to the bumps, for paper figure
+%addpath('/home/p277634/Documents/MATLAB/eeglab14_1_2b/')
+
+clear all
+close all
+clc
+
+bln     = 40;
+nroi    = 68;
+win = [100,100]; % time window on the left and right of the stage onset on samples
+
+dataPath = './data';
+dataFile = '/hmmBu4RroiAveMap3StagBL.mat';
+
+load('./data/mapFS2DTI.mat')
+load('./data/FCstages_R.mat', 'fcTh')
+fcTh = logical(fcTh ~= 0 );
+
+load([dataPath dataFile],'proBump', 'x','y','cond', 'subj')
+load('./data/thetaROIs_R.mat', 'roiaveR',  'yR','xR')
+% get bump locations
+[mOnset, onset, stages, mStage] = getDurations(proBump,x,y); % proBump cell(3,3)
+cond = cond;
+duraByCond = cell(4,1);
+for i = 1:4
+    duraByCond{i} = stages(cond==i,:);
+end
+info = {'from: saveAcrossTrialsPLI4paper.m, stage duration by conditions'};
+
+%save('/dataSave/stageDurationByCondition.mat','duraByCond','info')
+
+onsetN = round(onset/10+bln);
+lens = y - x + 1;
+lensR = yR - xR + 1;
+nroi = 68;
+nTr  = length(xR);
+nSj  = max(subj);
+nStg = size(onsetN,2)-1;
+stageIXs = [2,3,4,5];
+pairs = combnk([1:nroi],2);
+
+enveZS = zeros(sum(lensR),nroi,'single');
+
+phase = zeros(sum(lensR),nroi,'single');
+
+for tr = 1:nTr
+    data_   = padHilbert(roiaveR(xR(tr):yR(tr),:),40);
+    phase(xR(tr):yR(tr),:)   = angle(data_);
+    % enelope
+    envelope = abs(data_);
+    envMean = mean(envelope(40+1:end-40,:),1);
+    envSTD = std(envelope(40+1:end-40,:),[],1);
+    enveZS(xR(tr):yR(tr),:) = (envelope - envMean) ./ envSTD;
+end
+%% FC, PLI
+phZ = exp(1i* single(phase) );
+phFC = sign( imag( phZ(:, pairs(:,1)) .* conj(phZ(:, pairs(:,2))) )) ;
+
+
+[pliW, pliWsd]=getERP_SUBJwindow(phFC,stageIXs,win, onsetN, xR, yR,subj);
+
+vis1ERPFCwindows(pliW, fcTh(stageIXs+1,:), win(1),1)
+vis1ERPFCwindows(pliW, ~fcTh(stageIXs+1,:), win(1),1)
+
+
+[envZSW, envZSWsd]=getERP_SUBJwindow(enveZS,stageIXs,win, onsetN, xR, yR,subj);
+visERPwindows(envZSW(:,map.raw2cross,:), win(1),1)
+
+envZSW = envZSW(:,map.raw2cross,:);
+envZSWsd = envZSWsd(:,map.raw2cross,:);
+
+info = {'from: saveAcrossTrialsPLI4paper.m, PLI and z-scored envelopes like an erp'};
+
+%save('/dataSave/PLI8ENVbumperp.mat','win','pliW','pliWsd','fcTh','envZSW', 'info')
+
+% save('/dataSave/PLIacrossTials.mat','phFC','stageIXs','onsetN','xR','yR','subj','-v7.3')
+

+ 24 - 0
durationProcStages.m

@@ -0,0 +1,24 @@
+% duration of proecssing stages
+
+clear all
+close all
+clc
+
+dataPath = './data';
+dataFile = '/hmmBu4RroiAveMap3StagBL.mat';
+load([dataPath dataFile],'proBump','nPca', 'x','y','cond', 'subj','flatGam')
+
+% most likely model: lkhCV [bumps Model, hand Model, Subjects]
+models = 1;
+
+% COMPUTE durations
+mOnset  = cell(models,1); onset   = cell(models,1); %(number of bumps model)
+stages  = cell(models,1); mStage  = cell(models,1); 
+
+for buM = 1:models
+    [mOnset{buM,1}, onset{buM,1}, stages{buM,1}, mStage{buM,1}] = getDurations(proBump{buM},x,y); % proBump cell(3,3)
+end
+
+
+stagesR = stages{1,1};
+save('./dataSave/durationStages.mat', 'stagesR')

+ 49 - 0
files4brainNetViewer.py

@@ -0,0 +1,49 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Sep  4 15:25:17 2020
+
+@author: p277634
+"""
+import numpy as np
+from scipy.io import loadmat
+from itertools import combinations
+
+class BNVfiles():
+    def __init__(self):
+        self.fileNodes  = './data/Desikan-Killiany68_myTest.txt'
+        self.fileMapper = './data/mapFS2DTI.mat'
+        self.pathsave   = './dataSave/' 
+        self.nroi       = 68
+        self.pairs      = np.array(list(combinations(range(self.nroi),2)), dtype=np.int32)
+        self._getMapper()
+        
+    def _getMapper(self):
+        self.map    = loadmat(self.fileMapper)['map']['raw2cross'][0][0][0]-1
+               
+    def doNodes(self, colors=None, sizes=None, name='nodes'):
+        """ Desikan parcelation by default
+        @ colors: [nRoi], scale for coloring nodes
+        @ sizes: [nROi], scale for the size of nodes """
+
+        A = np.loadtxt(self.fileNodes)
+        if colors is not None:
+            A[:,3] = colors[self.map]
+            
+        if sizes is not None:
+            A[:,4] = sizes[self.map]
+        
+        np.savetxt(self.pathsave+name + '.node', A)
+            
+    def doEdges(self, edges, weights, name='edges'):
+        """ @ edges: directed FC with sign
+            @ weights: strength of edge """
+        ## Edges are transposed with respect to brain connectivity toolbox
+        fc = np.zeros((self.nroi, self.nroi))
+        for i in range(self.pairs.shape[0]):
+            if edges[i] > 0:
+                fc[self.map[self.pairs[i,0]], self.map[self.pairs[i,1]]] = weights[i]
+            elif edges[i] < 0:
+                fc[self.map[self.pairs[i,1]], self.map[self.pairs[i,0]]] = weights[i]
+        
+        np.savetxt(self.pathsave+name + '.edge', fc.T)

+ 101 - 0
findBoundsProcesStages.m

@@ -0,0 +1,101 @@
+% Appies HSMM-MVPA as in Anderson 2016 to find the boundaries of cognitive
+% processing stages
+
+
+bumpM   = 4; % number of bumps, 4 bumps == 5 stages
+
+nBuMax  = max(bumpM);
+nModel  = length(bumpM);
+nPca    = 32; % 32 PCA -> more than 95% variance, 25 -> more than 85%; 20 -> 80%  variance on PCA 
+nSj     = 18;
+rep     = 150;
+shape   = 2; % gamma distributoin, shape parameter
+roiDataR = 'nScoreAveR'; % 'nScoreAve' or 'nScorePca'
+
+dataPath = './data';
+dataRead = '/vars4hsmmBL_R.mat';
+
+roiTypeR  = ['roi' roiDataR(end-3:end)];
+roiTypeR(end) = [];
+saveName = ['/hmmBu' num2str(bumpM) 'R' roiTypeR 'Map3StagBL.mat']; 
+
+
+addpath('./hsmm/');
+
+
+info = {['test ', num2str(bumpM),' bumps n repetitions one models: R; map 4 stage, PCA cov by subject, Baseline']};
+
+load([dataPath, dataRead], roiDataR, 'condR','xR','yR','subjtR')
+
+% right hand
+data_   = eval(roiDataR);
+data    = single(data_(:,1:nPca));
+x       = xR;
+y       = yR;
+
+condR_ = condR;
+condR_(logical( condR==2 | condR==3 )) = 1; % fan1-target
+condR_(logical( condR==4 | condR==5 )) = 2; % fan2-target
+condR_(logical( condR==6 | condR==7 )) = 3; % fan1-foil
+condR_(logical( condR==8 | condR==9 )) = 4;  % fan2-foil
+
+cond  = condR_;
+subj = subjtR;
+% left hand
+
+clear data_ roiDataR xR yR condR condR_ subjtR 
+
+% define map settings, 3th stage (retrieval) different gamma
+map = [1, 1, 1, 1, 1;     
+       1, 1, 2, 1, 1;
+       1, 1, 3, 1, 1;
+       1, 1, 4, 1, 1];  
+
+gamIni  = repmat([2 200], nBuMax+1,1);
+magIni  = -1 + 2*rand(nPca, nBuMax, rep); 
+
+% bump kernel correlation, removed form inside HSMM
+data = calcBumps(data{1,1});
+lkhr = zeros(repnModel); % [repetitions,  hand response, models(number bumps),] 
+parpool(30);
+parfor r = 1:rep
+    % do HSMM
+        for m = 1:nModel % iterate models
+            [lkhr(r,m),~,~,~]=hsmmEEG50CondMapAllDNew(data,cond,magIni(:,1:bumpM(m),r),gamIni(1:bumpM(m)+1,:),1,x,y,map,shape);
+        end
+    end
+end
+disp('End of random initializations')
+
+% recompute best models
+lkh     = cell(1,nModel); % hand, model(nBump)
+bumpMag = cell(1,nModel);
+flatGam = cell(1,nModel);
+proBump = cell(1,nModel);
+for m = 1:nModel
+        [~,iBest]  = max(lkhr(:,m));
+        [lkh{1,m},bumpMag{1,m},flatGam{1,m},proBump{1,m}] = hsmmEEG50CondMapAllDNew(data,cond,magIni(:,1:bumpM(m),iBest),gamIni(1:bumpM(m)+1,:),1,x,y,map,shape);
+end
+
+% LOOCV, leave-one-out cross validation
+
+lkhCV = zeros(nSj,nModel);
+parfor sj = 1:nSj
+    for m = 1:nModel % iterate models  
+        sTrain  = logical(subj ~= sj);
+        sTest   = logical(subj == sj);
+        [~, magTrain, gammTrain, ~] = hsmmEEG50CondMapAllDNew(data,cond(sTrain),bumpMag{1,m},flatGam{1,m}, 1 ,x{h}(sTrain),y(sTrain),map,shape);
+        [ lkhCV(sj, m), ~, ~, ~]  = hsmmEEG50CondMapAllDNew(data,cond(sTest), magTrain,    gammTrain,    0 ,x(sTest),y(sTest),map,shape);
+    end
+end
+
+
+%save(['./dataSave' saveName], 'lkhr','lkh','bumpMag','flatGam','proBump','info','nPca', 'rep', 'x','y','cond','lkhCV','subj')
+%format longG
+disp(char(datetime(now,'ConvertFrom','datenum')))
+
+
+delete(gcp('nocreate'))
+
+clear all
+close all

+ 164 - 0
findInitailCond.py

@@ -0,0 +1,164 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Mon Jun 29 13:20:23 2020
+
+@author: p277634
+"""
+import scipy.io as sio
+import pygmo as po
+import numpy as np
+from itertools import combinations
+import copy
+
+class InitalProblem():
+    def __init__(self, mIx=0):
+        self.mIx        = mIx # model index
+        self.nroi       = 68
+        self.pathData   = './data/'
+        self.efcData    = 'FCstagesDTIrois.mat'
+        self.iniData    = 'partialInitialCondit.mat'
+        self.piVar      = np.pi/8
+        self.pairs      = np.array(list(combinations(range(self.nroi),2)), dtype=np.int32)
+        self.withNoise  = True
+        self.noise      = 0.01
+        self.nModel     = 50
+        self.sizePop    = 300
+        self.genera     = 1000  
+        self.pathSave   = './dataSave/'
+        self.nameSave   = 'OptiNoisy0p01IniState.npz'
+        self.info       = """intial amplitude and cosistent (ITPC) pahses defined from 10 samples backwards of the first bump\n
+                            with file boundIniState_2.m. ROIS are ordered as in DTI\n
+                            edgeSt has the edges and onsets of simulated FC stages in seconds \n
+                            edgeSt(n stages, [low edge upper edge, onset,]) \n'
+                            phase values from significant inter trial phase coherence were given and the resta were zeros \n.
+                            the file findInitalCong uses CMAES optimzer to find 50 possible inital states of the model"""
+    
+        self._importFCstage()
+        self._getEmpiricalInitialState()
+
+    def _importFCstage(self):        
+        filepath    = self.pathData + self.efcData
+        self.eFC    = np.float32(sio.loadmat(filepath)['fc1d'][1,:])
+        self.eFCs   = np.sign(self.eFC)
+        self.neFCs  = np.sum(np.abs(self.eFCs), axis=-1)
+        
+        
+    def _getEmpiricalInitialState(self):
+        # load initial conditions
+        loaddata    = self.pathData + self.iniData
+        iniSta      = sio.loadmat(loaddata) 
+        self.iniPhi0    = iniSta['iniState'][:,0]
+        self.iniAmp1    = iniSta['iniState'][:,1]
+        self.iniAmp2    = iniSta['iniState'][:,2]
+        self.edgeSt     = iniSta['edgeSt']
+        self.nIphi      = np.sum(self.iniPhi0 != 0)
+        self.missPhi    = self.nroi - self.nIphi
+        self.noIniPhi   = self.iniPhi0 == 0        
+        self.hasIniPhi  = self.iniPhi0 != 0 
+        if self.withNoise:
+            self.noiseIniPhase()
+        else:
+            self.mPhi      = np.mean(self.iniPhi[self.iniPhi.nonzero()])
+            self.iniPhi     = copy.deepcopy(self.iniPhi0)
+
+        
+    def get_bounds(self):
+        lowboud = [self.mPhi - self.piVar]*self.missPhi 
+        upbound = [self.mPhi + self.piVar]*self.missPhi 
+        return (lowboud,upbound)
+    
+    def costFunction(self, fci):
+        return  -1 * np.sum(fci * self.eFCs) / self.neFCs
+    
+    def noiseIniPhase(self):
+        iniPhi     = copy.deepcopy(self.iniPhi0)
+        self.iniPhi     = np.repeat(iniPhi[:,np.newaxis], self.nModel, axis=1)
+        self.iniPhi[self.hasIniPhi,:] += self.noise * np.random.randn(self.nIphi, self.nModel)
+        self.mPhi      = np.mean(self.iniPhi[self.iniPhi[:,0].nonzero(),:], axis=1)
+
+    
+    def fullfilConstrin(self, fci):
+        # unequal signs are converted to positive values that violate contraints
+        # constraint fulfiltmnet is associatied with a negative value (pygmo requirement)
+        match = -1 * fci * self.eFCs 
+        return match[self.eFCs != 0]
+     
+    def fitness(self, x):
+        phis = copy.deepcopy(self.iniPhi[:, self.mIx])
+        phis[self.noIniPhi] = x
+        # fci = np.sign(phis[self.pairs[:,0]] - phis[self.pairs[:,1]])
+        fci = np.sign( np.imag( np.exp(1j*phis[self.pairs[:,0]]) * np.conj(np.exp(1j*phis[self.pairs[:,1]]) )))
+
+        # cnstr = self.fullfilConstrin(fci)
+        fit = self.costFunction(fci)
+        # return np.insert(cnstr, 0, fit)
+        return np.array([fit])
+    
+    def gradient(self, x):
+        return po.estimate_gradient_h(lambda x: self.fitness(x), x)
+    
+    def putNoiseAmpli(self):
+        self.iniAmp1  = np.repeat(self.iniAmp1[:,np.newaxis], self.nModel, axis=1)
+        self.iniAmp2  = np.repeat(self.iniAmp2[:,np.newaxis], self.nModel, axis=1)
+        self.iniAmp1 += self.noise * np.random.randn(self.nroi, self.nModel)
+        self.iniAmp2 += self.noise * np.random.randn(self.nroi, self.nModel)  
+        self.iniAmp1[self.iniAmp1<0.02] = 0.02 
+        self.iniAmp2[self.iniAmp2<0.02] = 0.02 
+        
+    
+        
+    def nInitialStates(self):
+        self.phi    = copy.deepcopy(self.iniPhi)
+        self.fitpli = np.zeros(self.nModel) 
+        for iXm in range(self.nModel):
+            # prob = po.problem(udp=self.__class__(mIx=iXm))
+            pop = po.population(prob = FindIniState(self, iXm), size = self.sizePop)
+            algo = po.algorithm(uda=po.cmaes(gen=self.genera,force_bounds=True))
+            pop = algo.evolve(pop)
+            self.fitpli[iXm] = pop.champion_f[0]
+            self.phi[self.noIniPhi,iXm] = pop.champion_x
+        self.putNoiseAmpli()
+            
+  
+    def saveIniState(self):
+        ''' info: inicond are computed at 10 sampels before onset '''
+        data = np.zeros((self.nroi,3,self.nModel), dtype=np.float32)
+        data[:,0,:] = self.phi
+        data[:,1,:] = self.iniAmp1
+        data[:,2,:] = self.iniAmp2
+        saveto = self.pathSave + self.nameSave
+        np.savez(saveto, info=self.info,iniState=data,  edgeSt=self.edgeSt, piVar=self.piVar, noise=self.noise, fit=self.fitpli)
+        
+
+
+class FindIniState():
+    def __init__(self, ini, mIx):
+        self.ini = ini
+        self.mIx = mIx
+        # print('Here')
+        
+    def get_bounds(self):
+        lowboud = [self.ini.mPhi[0][self.mIx] - self.ini.piVar]*self.ini.missPhi 
+        upbound = [self.ini.mPhi[0][self.mIx] + self.ini.piVar]*self.ini.missPhi
+        return (lowboud, upbound)
+        
+    def fitness(self, x):
+        phis = copy.deepcopy(self.ini.iniPhi[:, self.mIx])
+        phis[self.ini.noIniPhi] = x
+        fci = np.sign( np.imag( np.exp(1j*phis[self.ini.pairs[:,0]]) * np.conj(np.exp(1j*phis[self.ini.pairs[:,1]]) )))
+        fit = self.costFunction(fci)
+        # return np.insert(cnstr, 0, fit)
+        return np.array([fit])
+    
+    def costFunction(self, fci):
+        return  -1 * np.sum(fci * self.ini.eFCs) / self.ini.neFCs
+    
+if __name__ == "__main__":
+    
+    ini = InitalProblem()
+    
+    ini.nInitialStates( )
+    ini.saveIniState()
+    
+   

+ 24 - 0
getERP_SUBJwindow.m

@@ -0,0 +1,24 @@
+function [mWin,sdWin]=getERP_SUBJwindow(data,iXstg,win, onsets, x, y, subj)
+nroi    = size(data,2);
+nTr     = length(x);
+nStg    = length(iXstg);
+nSj     = max(subj);
+N       = abs(win(1)) + win(2) + 1;
+
+dataWin = zeros(N, nroi,nStg, nTr, 'single');
+for tr = 1:nTr,
+    dataTr = data(x(tr):y(tr),:);
+    for iSt = 1:nStg        
+        idXpos = onsets(tr,iXstg(iSt))-win(1) : onsets(tr,iXstg(iSt))+win(2);
+        idXpos(idXpos<1) = 1;
+
+        data_ = dataTr(idXpos, :) ;       
+        dataWin(:,:,iSt,tr) = data_;
+    end
+end
+dataSj_ = zeros(N, nroi, nStg, nSj, 'single');
+for sj = 1:nSj,    
+    dataSj_(:,:,:,sj) = squeeze( mean(dataWin(:,:,:, subj==sj),4));
+end
+mWin  = squeeze(mean(dataSj_,4));
+sdWin = squeeze(std(abs(dataSj_),[],4)) ./ sqrt(nSj);

+ 53 - 0
gridSearch_G_L.py

@@ -0,0 +1,53 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Feb 23 10:04:55 2021
+
+@author: Oscar Portoles
+"""
+
+import numpy as np
+import myKAOctrl_basic as udp
+import time
+import matplotlib.pyplot as pl
+import sys
+
+
+tRun    = 1000
+tDisc   = 200
+nModel  = 25
+
+localC  = np.arange(0.1, 1.6, 0.1)
+globalC = np.arange(0,2.,0.05)
+
+
+pathSave = './dataSave/Globals_v5ms_grid_globalC_co_.npz'
+
+gMet = np.zeros((len(localC), len(globalC)))
+lMet = np.zeros((len(localC), len(globalC)))
+gMetT = np.zeros((len(localC), len(globalC), (tRun-tDisc)*100, nModel), dtype=np.float16)
+
+
+for iXlC, lC in enumerate(localC):
+    for iXgC, gC in enumerate(globalC):
+        kao = udp.KAOmodel()
+        kao.tMax        = tRun
+        kao.tDiscard    = tDisc
+        kao.nModel      = nModel
+        kao.globalC     = np.float64(gC)
+        kao.localC      = np.float64(lC)
+        kao.interHsc    = 1
+        kao.velocity    = 5 
+        kao.iniCond     = 1 # random
+        kao.runModel()
+        z_abs       = udp.KAOmodel._absZ(kao.z[:, int(tDisc*kao.fs):,:])
+        z_norm      = kao.z[:, int(tDisc*kao.fs):,:] / z_abs # makes all modulus equal to one
+        # global ordedr with local order == 1 at time t
+        kop     = np.abs( np.mean( z_norm, axis = 0 ))
+        gMetT[iXlC, iXgC,:,:]      = np.float16(kop) 
+        # orders, metaL = kao.doKuramotoOrder(kao.z[:, int(tDisc*kao.fs):,:])
+        gMet[iXlC, iXgC] = np.mean(np.std(kop, axis=0))
+        lMet[iXlC, iXgC] = np.mean(np.std(z_abs,axis=1))        
+
+np.savez(pathSave, gMet=gMet, lMet=lMet, localC=localC, globalC=globalC, gMetT=gMetT) 
+

+ 70 - 0
metaAfterTask.py

@@ -0,0 +1,70 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Feb 23 10:09:40 2021
+
+@author: Oscar Portoles
+"""
+
+import numpy as np
+import sys
+import visArchis_seq as v
+# import optimProbs as oudp
+import modelStages as udp
+import scipy as sp
+from itertools import combinations
+
+
+tSimu  = 400
+saveTo = './dataSave/metaFC1000afterPulse_P040_G0p15L0p6.npz'
+
+
+# Load data
+d = v.LoadData()
+d.nameModel = 'stgSequence_Lopt_allPu1500_wE_P040_optIni_v5_fixG0p15L0p6_Stg'
+d.nameEnding =''
+d.stages = [1,2,3,4]
+m = d.getBestModel()
+pairs = np.array(list(combinations(range(m.nroi),2)), dtype=np.int32)
+
+# get puses and best fitness
+pulses = np.zeros((m.nroi,m.nStg))
+fits   = np.zeros(m.nStg)
+for i in range(m.nStg):
+    pulses[:,i] = m.logs[i][0][0]
+    fits[i] = m.logs[i][0][3]
+
+metaG, metaL,sim2eFC = [None]*4, [None]*4, [None]*4
+metaGtot, metaLtot = np.zeros(4), np.zeros(4)
+matchFCs = [None]*4
+for iX in range(4):
+    stages = list(range(iX+1))
+    kao = udp.KAOmodel(stageIx=stages)
+    # use model with pulses
+    kao.iniCond = 2
+    kao.tMax = tSimu
+    kao._setHistDelayAndContainers()
+    kao._edgesImpulse()  
+    kao._getEmpiReativEnv()
+    kao.doNoise(variance=0)
+    kao.pulse = pulses[:,stages]
+    # kao.pulse = np.zeros((68,1))
+    kao.runModel_Pulsein_sequence()
+    
+    zds = kao.downsampleModel(kao.z)
+    # metastability
+    metaG_ = np.abs(np.mean(np.exp(1j*np.angle(zds)),axis=0))
+    metaGtot[iX] = np.mean(np.std(metaG_,axis=0))
+    metaG[iX] = np.mean(metaG_, axis=1)
+    zdsA  = udp.KAOmodel._absZ(zds)
+    metaL[iX] = zdsA
+    metaLtot[iX] = np.mean( np.mean(np.std(zdsA,axis=1),axis=0) ) 
+    # match to last FC state
+    csi     = np.sign( np.imag(zds[pairs[:,0],:,:] * np.conjugate(zds[pairs[:,1],:,:])) )
+    mcsi    = np.sign( np.mean(csi, axis=2) ) # mean over models
+    sim2eFC_= np.ones((len(stages), mcsi.shape[1]),dtype=np.float32)
+    for i in range(len(stages)):
+        sim2eFC_[i,:] = np.float32(np.sum( kao.eFCs[i,:] * mcsi.T, axis=1) / kao.neFCs[i])
+    sim2eFC[iX] = sim2eFC_
+        
+np.savez(saveTo, sim2eFC=sim2eFC, metaG=metaG,metaL=metaL,metaGtot=metaGtot,metaLtot=metaLtot,pulses=pulses,fitSt=fits)

+ 536 - 0
modelStages.py

@@ -0,0 +1,536 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Mon Feb 22 15:18:09 2021
+
+@author: Oscar Portoles
+"""
+
+import numpy as np
+import numba as nb
+import scipy.stats as st
+import scipy.io as sio
+import time
+from itertools import combinations
+import copy
+
+class WBLSmodel():
+    """ whole-brain large-scale model and analysis methods """
+    def __init__(self,stageIx=0,vel=5,zh=None):
+        """ @ stageIx: int or list, with stage's indexes to simulate
+            @ vel: float, velocity of spike-propagation velocity [m/s]
+            @ zh: ndim array, iniital history of models pahses and amplitudes
+        """
+        self.zhist    = zh                  # history of initial conditions
+        self.localC   = np.float64(0.6)     # default Local coupling, L
+        self.globalC  = np.float64(0.15)    # default Global coupling G
+        self.velocity = np.float32(vel)     # velocity [meter / second]
+        self.interHsc = np.float32(1.0)     # scaling factor between hemispheres
+        self.tMax     = np.float32(1.15)    # time of simulation
+        self.fs       = np.float32(100.0)   # samplig frequency for FC
+        self.omega    = np.float64(6)       # Central natural frequency of ROIs [Hz] 
+        self.dlt      = np.float64(1)       # Spread of natural frequencies
+        self.dt       = np.float64(1e-3)    # integration step
+        self.stageSim = self._setStageSim(stageIx)
+        self.iniCond  = 2    # Initial conditions - 1: random (fiexd seed) , 2: emprical initial state, 3: testin nModel models identical, 4: empirical init State eover 3rd dimension
+        self.log      = []
+        self.widthPulse = 0.030     # [seconds] duration of the pulse
+        self.offMidBump = 0.01      # relative to the middel poit of a bump [sec]
+        self.preBumpWin = 0.05      # 5ms, as long as bump [sec]
+        self.bumpDur    = 0.05      # duration of bump [sec]
+        self.pathData = './data/'
+        self.nameDTIm = 'mCons9Anato.mat'                   # structural connectome
+        self.nameIni  = 'OptiNoisy0p01IniState.npz'         # inital condition first stage from pre-visual encoding  + noise
+        self.efcData  = 'FCstagesDTIrois.mat'               # MEG within-stage dpFC
+        self.mapFile  = 'mapFS2DTI.mat'                     # mapping frm FResurfer layout of ROIS to our visualization
+        self.nameEnvs = 'meanEnvelopeByStage.mat'           # relative change of envelope amplitude at the onset of stages
+        self.nModel   = np.int32(25)        # number of model with different inital conditions to run simulataneously
+        self.oneModel = False 
+        self.iniSeed  = 1                   # Seed for random intial conditions
+        self._getAnatomicNetw()
+        self._getEmpiricalInitialStateNP()
+        self._importFCstage()
+        self.setLocalParam(omega=self.omega, dlt=self.dlt)
+        self.setNumbaThreaths()
+        
+    def setNumbaThreaths(self, n=1):
+        nb.config.NUMBA_NUM_THREADS = n
+                
+    def get_mylogs(self):
+        return self.log
+    
+    def _setStageSim(self, stSim):
+        if isinstance(stSim, list): 
+            return np.array(stSim)
+        else:
+            return np.array([stSim])
+
+    def _getAnatomicNetw(self): 
+        # load anatomical data
+        loaddata    = self.pathData + self.nameDTIm
+        dti         = sio.loadmat(loaddata)     # C: structural connectivity, D: distance between areas.
+        self.D      = dti['mcdA']                  # Distances beween nodes
+        self.anato  = dti['mcwA'].astype(np.float64) # Strucural/anatomical network
+
+        self.C          = np.shape(self.D)[1]          # number of nodes/brain areas
+        self.pairs      = np.array(list(combinations(range(self.C),2)), dtype=np.int32)
+        self.nFCs       = self.pairs.shape[0]
+        self.pairsC     = np.vstack((self.pairs, np.fliplr(self.pairs)))
+        # Distance to meters
+        self.D      = self.D / 1000.0       
+
+    def _getEmpiricalInitialStateNP(self):
+        # load initial conditions
+        loaddata    = self.pathData + self.nameIni
+        iniSta      = np.load(loaddata,allow_pickle=True) 
+        self.eIniPhi       = iniSta['iniState'][:,0,:]
+        self.eIniAmp1      = iniSta['iniState'][:,1,:]
+        self.eIniAmp2      = iniSta['iniState'][:,2,:]
+        self.statesT       = iniSta['edgeSt'][self.stageSim, :2]
+        bump               = iniSta['edgeSt'][self.stageSim,2]         
+        preBu              = np.array([bump - self.bumpDur/2 - self.preBumpWin, bump - self.bumpDur/2])
+        atBu               = np.array([bump - self.bumpDur/2, bump + self.bumpDur/2])       
+        if self.stageSim.size == 1 :
+            if self.stageSim > 0 :
+                offset_stage_before = iniSta['edgeSt'][self.stageSim-1, 1] -self.dt
+                self.statesT    = self.statesT - offset_stage_before
+                bump            = bump - offset_stage_before
+                preBu           = preBu - offset_stage_before
+                atBu            = atBu - offset_stage_before     
+            
+        self.edges_n       = np.int32(self.fs * self.statesT) 
+        self.edges_dt      = np.int32((1/self.dt) * self.statesT) 
+        self.nStates       = self.statesT.shape[0] 
+        self.tMax          = self.statesT.max() + 1/self.fs
+        self.bump_n        = np.int32(self.fs * bump)
+        self.bump_dt       = np.int32((1/self.dt)  * bump)
+        self.preBu_dt      = np.int32(preBu / self.dt)
+        self.atBu_dt       = np.int32(atBu / self.dt)
+        
+    def _getEmpiReativEnv(self):
+        loaddata    = self.pathData + self.nameEnvs
+        dti         = sio.loadmat(loaddata) 
+        self.eRelEnv  = dti['envRel'][:, self.stageSim] # [roois, stages]  
+               
+    def setLocalParam(self, omega=6, dlt=1):
+        omga_   = 2*np.pi * np.float64(omega) * self.dt
+        dlt_    = np.float64(dlt) * self.dt        
+        self.locaDyn = np.complex64(-dlt_ + 1j* omga_)
+        
+    def _importFCstage(self):        
+        filepath    = self.pathData + self.efcData
+        self.eFC    = np.float32(sio.loadmat(filepath)['fc1d'][2 + self.stageSim,:])
+        self.eFCs   = np.sign(self.eFC)
+        self.neFCs  = np.sum(np.abs(self.eFCs), axis=-1)
+        self.inv_neFCs = 1.0 / self.neFCs
+       
+    @nb.vectorize([nb.float32(nb.complex64, nb.complex64)],cache=True,target='cpu',nopython=True)
+    def _csd(zi, zj):
+        csd = zi * np.conjugate(zj)        # sample phase differences sign
+        csdi = np.imag(csd)
+        return  np.sign( csdi )        
+
+    def dPLI3d_v(self, z):
+        # PLI, cross spectral density
+        z_i = z[self.pairs[:,0], self.edges_n[0,0]:self.edges_n[-1,-1], :] 
+        z_j = z[self.pairs[:,1], self.edges_n[0,0]:self.edges_n[-1,-1], :] 
+        # print('z_i shape: ', z_i.shape[1])
+        # print('state chope: ',(self.edges_n[0,0], self.edges_n[-1,-1]))
+        edges_n_0 = self.edges_n - self.edges_n[0,0]
+        # sign phase difference
+        siCsd = WBLSmodel._csd(z_i, z_j)
+        self.pli = np.empty((self.nStates, self.nFCs), dtype=np.float32)
+        # PLI over stages
+        for i in range(self.nStates):
+            # mean PLI over stage samples and model
+            self.pli[i,:] = np.mean(siCsd[:, edges_n_0[i,0]:edges_n_0[i,1],:], axis=(1,2))
+        
+    def similarityAllPLIs(self):
+        # fater than a numbazied function
+        sigSimFC = np.sign(self.pli)  
+        return np.sum( self.eFCs * sigSimFC, axis=1) / self.neFCs
+    
+    def similarityAllPLIsWenergy(self):
+        # fater than a numbazied function
+        sigSimFC= np.sign(self.pli)  
+        simil   = np.sum( self.eFCs * sigSimFC, axis=1) / self.neFCs
+        energy  = np.sum(np.abs(self.pulse)) 
+        return simil - energy * self.inv_neFCs / self.maxEner
+
+    def getPreAtBumpEnv(self):
+        # change to z-score of the whole trial??
+        self.preBu_env  = np.zeros((self.C,self.nModel,self.nStates)) # ,int(self.bumpDur*self.dt)
+        self.atBu_env   = np.zeros((self.C,self.nModel,self.nStates))
+        for iXs in range(self.nStates):
+            z_pre   = self.z[:, int(self.preBu_dt[0,iXs]+self.maxDlay):int(self.preBu_dt[1,iXs]+self.maxDlay), :]
+            if self.preBu_dt[0,iXs]+self.maxDlay < 0:
+                z_pre   = self.z[:, :int(self.preBu_dt[1]+self.maxDlay), :]
+            if z_pre.shape[1] < 5:
+                z_pre = np.nan
+            z_at    = self.z[:, int(self.atBu_dt[0,iXs] +self.maxDlay):int(self.atBu_dt[1,iXs] +self.maxDlay), :]
+            self.preBu_env[:,:,iXs] = np.mean(WBLSmodel._absZ( z_pre ), axis=1)  
+            self.atBu_env[:,:,iXs]  = np.mean(WBLSmodel._absZ( z_at ), axis=1)   
+     
+    
+    def getScorrelRelEnv(self):
+        self.sCor = np.zeros(self.nStates)
+        for iXs in range(self.nStates):
+            self.sCor[iXs], pval = st.spearmanr( self.eRelEnv[:,iXs], self.relEnv[:,iXs])         
+    
+    def similarityPLIsWenergySperm(self):
+        similFC       = np.sum( self.eFCs * np.sign(self.pli), axis=1) / self.neFCs
+        energyCoef  = np.sum(np.abs(self.pulse))  / self.maxEner
+        # print('fc: ',similFC[0],' energy: ', energyCoef ,' sCor: ', self.sCor)
+        return similFC[0] - energyCoef * (1-self.sCor) * self.inv_neFCs 
+
+    def relativeEnvBuPre(self):
+        self.getPreAtBumpEnv()
+        self.relEnv = np.mean( (self.atBu_env - self.preBu_env) / self.preBu_env , axis=1) # mean over models of relative change
+    
+    def getAnatoCoupling(self,kG,kL):
+        """Get anatomical network with couplings"""
+                # normalization adjacency matrix
+        self.anatoNorm  = self.anato / np.mean(self.anato[~np.identity(self.C,dtype=bool)])   # normalize structural network to a mean = 1
+        kS = self.anatoNorm * kG / self.C        # Globa  coupling
+        np.fill_diagonal(kS,kL)              # Local coupling
+        return np.float64(kS)
+    
+    def getAnatoCouplingIHS(self,kG,kL,ihs):
+        """Get anatomical network with couplings and inter-hemisphere scaling"""
+        self.anatoScaleNormali(ihs)       # IHS and normalization adjacency matrix    
+        kS = self.anatoSC * kG / self.C        # Globa  coupling
+        np.fill_diagonal(kS,kL)              # Local coupling
+        return np.float64(kS)
+    
+    def anatoScaleNormali(self, h):
+        self.anatoSC = np.zeros((self.C,self.C))
+        hC = int(self.C/2)
+        # scale from one hemisphere to another one
+        self.anatoSC[hC:,:hC] = self.anato[hC:,:hC] * h
+        self.anatoSC[:hC,hC:] = self.anato[:hC,hC:] * h
+        # within hemsphere connections
+        self.anatoSC[:hC,:hC] = self.anato[:hC,:hC]
+        self.anatoSC[hC:,hC:] = self.anato[hC:,hC:]
+        # normalization
+        self.anatoSC  = self.anatoSC / np.mean(self.anatoSC[~np.identity(self.C,dtype=bool)])   # normalize structural network to a mean = 1
+
+    def _doCouplingLSBMsimu(self):
+        """ get coupling matris with predefined local global cuplings ans scale by dt and 1/2
+            So, it is ready to be used in teh the simulations """
+        if self.interHsc == 1.0:
+            K_ = self.getAnatoCoupling(self.globalC, self.localC)
+        else:
+            K_ = self.getAnatoCouplingIHS(self.globalC, self.localC, self.interHsc)
+        ksim = np.float32( 0.5 * K_ * self.dt) 
+        self.Ksim = np.repeat(ksim[:,:,np.newaxis],self.nModel,axis=2)
+    
+    def getDelays(self,vel):
+        """Return maximum delay and delay steps in samples"""
+#        dlay        = self.D / (1000.0 * vel)                # [seconds] correct from mm to m
+        self.dlay   = self.D / vel           # [seconds] correct from mm to m
+        self.dlayStep    = np.around(self.dlay / self.dt).astype(np.int32)  # delay on steps backwards to be done
+        self.maxDlay     = np.int64(np.max(self.dlayStep))                  # number of time steps for the longest dela
+        self.iXc = np.tile( np.arange(self.C, dtype=np.int32), (self.C,1)).T  # index for ROIs 
+
+    @nb.vectorize([nb.float32(nb.complex64)],cache=True,target='cpu',nopython=True)    
+    def _absZ(z):
+        return np.abs(z) # normalize arguments to one
+    
+    # @profile
+    def doKuramotoOrder(self,z):
+        z_norm      = z / self.z_abs # makes all modulus equal to one
+        # global ordedr with local order == 1 at time t
+        orderD      = np.abs( np.mean( z_norm, axis = 0 )) 
+        # temporal mean and std, over models
+        orderSD     = np.mean( np.std( orderD, axis=0 ) ).astype(np.float16)
+        order1      = np.mean( np.mean( orderD, axis=0 ) ).astype(np.float16)
+        # mean local order over rois, and then over time, over models
+        orderL      = np.mean( np.mean(np.mean( self.z_abs, axis = 0 ), axis=0) ).astype(np.float16)
+        # mean local variation (metastability), over models
+        metaL   = np.float16( np.mean(np.std( self.z_abs, axis=1), axis=1) )
+        orders  = np.array([orderL, order1, orderSD])
+        return orders, metaL
+                        
+    def impulsePerturb(self):
+        """ Local couplings perturbation: 
+            (L + pulse) * Z_() = Z'_() wehre z_() is couplin variable in model
+            becomes -> (1 + pulse/L) * z_() = Z'_() \ reshape to 2-dimensions"""   
+        # print('impulse ID: ', np.sum(self.pulse**2))
+        self.impulse = (1 + self.pulse.reshape(self.C, self.nStates) / self.localC).astype(np.float32)
+
+    def _edgesImpulse(self):   
+        self.edgePulse = np.zeros((self.nStates, 2))
+        for i in range(self.nStates):
+            self.edgePulse[i, 0] = self.bump_dt[i] - (self.offMidBump + 0.5*self.widthPulse) * 1/self.dt
+            self.edgePulse[i, 1] = self.bump_dt[i] - (self.offMidBump - 0.5*self.widthPulse) * 1/self.dt
+  
+    @nb.vectorize([nb.complex64(nb.complex64, nb.complex64, nb.complex64)],cache=True,target='cpu',nopython=True)
+    def _sumPartsASdt(param, z_n, couplingSum):
+        return z_n + param * z_n - couplingSum
+        
+    @nb.vectorize([nb.complex64(nb.float32, nb.complex64, nb.complex64)],cache=True,target='cpu',nopython=True)
+    def _coupling(kS, zn2, zp_del):#, res):
+        # zn2 = zn * zn
+        z_pro = np.conjugate(zp_del) * zn2
+        return kS * (z_pro - zp_del)
+    
+    @nb.vectorize([nb.complex64(nb.complex64)],cache=True,target='cpu',nopython=True)   
+    def _zn2(z):
+        return z * z
+
+    def _KMAOcommuParZnV(z,maxDlay,dlayStep,fs,dt,kS, iXc,localPara):
+        # best timing so far
+        for n in range(maxDlay,z.shape[1]-1):
+            zn      = z[:,n,:]  #           
+            iXdel   = n-dlayStep
+            zp_del  = z[iXc, iXdel,:]
+            zn2     = WBLSmodel._zn2( zn )
+            # zn2_rep = np.repeat(zn2[:,np.newaxis,:], zn.shape[0], axis=1)
+            zn2_rep = np.repeat(zn2[np.newaxis,:,:], zn.shape[0], axis=0)
+            couplin = WBLSmodel._coupling(kS, zn2_rep, zp_del)#, couplin)
+            kzpn    = np.sum(couplin, axis=0)
+            # add differntial step 
+            z[:,n+1,:]    = WBLSmodel._sumPartsASdt(localPara, zn , kzpn)
+        return z
+    
+    def _KMAOcommuParZnV_NoDel(z,fs,dt,kS,localPara):
+        nMo     = z.shape[2]
+        # best timing so far
+        for n in range(z.shape[1]-1):
+            zn      = z[:,n,:]  #                             
+            zn2     = WBLSmodel._zn2( zn )
+            zn2_rep = np.lib.stride_tricks.as_strided(zn2,(68, 68, nMo),(0, zn2.strides[0], zn2.strides[1]))
+            zp_rep  = np.lib.stride_tricks.as_strided(zn,(68, 68, nMo),( zn.strides[0],0, zn.strides[1]))
+            couplin = WBLSmodel._coupling(kS, zn2_rep, zp_rep)#, couplin)
+            kzpn    = np.sum(couplin, axis=0)
+            # add differntial step 
+            z[:,n+1,:]    = WBLSmodel._sumPartsASdt(localPara, zn , kzpn)
+        return z 
+#
+    def _KMAOcommuParZnV_in(z,maxDlay,dlayStep,fs,dt,kS, iXc,localPara, impulse, edges_p):
+        # best timing so far
+        iXdiag = np.diag_indices(68, 2)
+        for n in range(maxDlay,z.shape[1]-1):
+            zn      = z[:,n,:]  #           
+            iXdel   = n-dlayStep
+            zp_del  = z[iXc, iXdel,:]
+            zn2     = WBLSmodel._zn2( zn )
+            # zn2_rep = np.repeat(zn2[:,np.newaxis,:], zn.shape[0], axis=1)
+            zn2_rep = np.repeat(zn2[np.newaxis,:,:], zn.shape[0], axis=0)
+            couplin = WBLSmodel._coupling(kS, zn2_rep, zp_del)#, couplin)
+            # index diagonal elements and add pulse in-place
+            if (n >= edges_p[0,0]) and (n <= edges_p[0,1]):
+                couplin[iXdiag] *= impulse[:,0,np.newaxis]               
+            kzpn    = np.sum(couplin, axis=0)
+            # add differntial step 
+            z[:,n+1,:]    = WBLSmodel._sumPartsASdt(localPara, zn , kzpn)
+        return z
+
+    def _KMAOcommuParZnV_NoDel_in(z,fs,dt,kS,localPara, impulse, edges_p):
+        nMo     = z.shape[2]
+        iXdiag = np.diag_indices(68, 2)
+        # best timing so far
+        for n in range(z.shape[1]-1):
+            zn      = z[:,n,:]  #                             
+            zn2     = WBLSmodel._zn2( zn )
+            zn2_rep = np.lib.stride_tricks.as_strided(zn2,(68, 68, nMo),(0, zn2.strides[0], zn2.strides[1]))
+            zp_rep  = np.lib.stride_tricks.as_strided(zn,(68, 68, nMo),( zn.strides[0],0, zn.strides[1]))
+            couplin = WBLSmodel._coupling(kS, zn2_rep, zp_rep)#, couplin)
+            # index diagonal elements and add pulse in-place
+            if (n >= edges_p[0,0]) and (n <= edges_p[0,1]):
+                couplin[iXdiag] *= impulse[:,0,np.newaxis]  
+            kzpn    = np.sum(couplin, axis=0)
+            # add differntial step 
+            z[:,n+1,:]    = WBLSmodel._sumPartsASdt(localPara, zn , kzpn)
+        return z   
+    
+    def _KMAOcommuParZnV_in_sequen(z,maxDlay,dlayStep,fs,dt,kS, iXc,localPara, impulse, edges_p):
+        iXdiag = np.diag_indices(68, 2)
+        for n in range(maxDlay,z.shape[1]-1):
+            zn      = z[:,n,:]  #           
+            iXdel   = n-dlayStep
+            zp_del  = z[iXc, iXdel,:]
+            zn2     = WBLSmodel._zn2( zn )
+            zn2_rep = np.repeat(zn2[np.newaxis,:,:], zn.shape[0], axis=0)
+            couplin = WBLSmodel._coupling(kS, zn2_rep, zp_del)#, couplin)
+            # index diagonal elements and add pulse in-place
+            try:
+                if (n >= edges_p[0,0]) and (n <= edges_p[0,1]):
+                    couplin[iXdiag] *= impulse[:,0,np.newaxis]
+            except:
+                pass
+            try:
+                if (n >= edges_p[1,0]) and (n <= edges_p[1,1]):
+                    couplin[iXdiag] *= impulse[:,1,np.newaxis] 
+            except:
+                pass
+            try:
+                if (n >= edges_p[2,0]) and (n <= edges_p[2,1]):
+                    couplin[iXdiag] *= impulse[:,2,np.newaxis]
+            except:
+                pass
+            try:
+                if  (n >= edges_p[3,0]) and (n <= edges_p[3,1]):
+                    couplin[iXdiag] *= impulse[:,3,np.newaxis] 
+            except:
+                pass
+            kzpn    = np.sum(couplin, axis=0)
+            # add differntial step 
+            z[:,n+1,:]    = WBLSmodel._sumPartsASdt(localPara, zn , kzpn)
+        return z  
+       
+    def downsampleModel(self, z):
+        # simple downsampling (there may be aliasing)
+        z = z[:,self.maxDlay+1:,:]     # remove history samples used in the begining
+        return z[:,::np.int32(1./(self.fs*self.dt)),:]
+           
+    def _setHistDelayAndContainers(self):
+        if self.velocity != 0:
+            self.getDelays(self.velocity)
+        else:
+            self.maxDlay = 0
+        
+        if self.stageSim.size > 1:    
+            self.zIni = self._doNodeContainers(self.maxDlay)
+        elif self.stageSim == 0:
+            self.zIni = self._doNodeContainers(self.maxDlay)
+        elif self.stageSim > 0 :
+            self.zIni = self._doNodeContainerSimHistory()       
+            
+    def _doNodeContainers(self, maxDlay):
+        """ containers with shape [number of ROIs * number of models, number of samples]
+            models are concatenated along raws (vertically), 
+        """
+        self.nCM = self.C*self.nModel # number of models and communites
+        eIniAmp1 = self.eIniAmp1[:,:self.nModel].T.ravel()
+        eIniAmp2 = self.eIniAmp2[:,:self.nModel].T.ravel()
+        eIniPhi  = self.eIniPhi[:,:self.nModel].T.ravel()
+        # node's variables
+        r           = np.zeros((self.nCM, int(self.tMax/self.dt + maxDlay)), dtype=np.float32) # node phase parameter [C, Nsamples to integrate]
+        phi         = np.zeros((self.nCM, int(self.tMax/self.dt + maxDlay)), dtype=np.float32) # node phase parameter [C, Nsamples to integrate]
+        # initial conditions as history for the time delays
+        omegaT      = self.omega * np.tile(np.linspace(0, (maxDlay+1)*self.dt, maxDlay+1, dtype=np.float32), (self.nCM,1))
+        if self.iniCond == 1: # random with fiexd seed
+            np.random.seed(self.iniSeed)
+            phiRa       = np.tile(np.random.uniform(-np.pi, np.pi,self.nCM), (maxDlay+1,1)).T
+            phi[:,0:maxDlay+1]  = np.float32(np.remainder(omegaT + phiRa, 2*np.pi))
+            np.random.seed(self.iniSeed+1)
+            r[:,0:maxDlay+1]    = np.tile( np.random.uniform(0.05, 0.95,self.nCM), (maxDlay+1,1)).T
+        elif self.iniCond == 2: # phase: measured initial conditions
+            # phase
+            phiIni = np.tile(eIniPhi, (maxDlay+1,1)).T
+            phi[:,0:maxDlay+1]  = np.float32(np.remainder(omegaT + phiIni, 2*np.pi))
+            # amplitude
+            slope   = (eIniAmp2 - eIniAmp1) / (-0.11/self.dt)    # a = (y2 - y1) / (x2 - x1)
+            interc  = eIniAmp1 - slope*(-0.11/self.dt)              # b = y1 - a * x1  
+            point1  = interc + slope*(maxDlay+1)
+            rh = np.zeros((self.nCM, maxDlay+1), dtype=np.float32)
+            for i in range(self.nCM):
+                rh[i,:] = np.linspace(point1[i], eIniAmp2[i], maxDlay+1)
+            rh[rh<0.05] = 0.05
+            r[:,0:maxDlay+1] = rh
+        elif self.iniCond == 3: # phase: half plane; amplitude: stabl 0.5
+            phiOff       = np.tile(np.linspace(0,np.pi,self.C), (int(maxDlay+1),self.nModel)).T 
+            phi[:,0:maxDlay+1]  = np.float32(np.remainder(omegaT + phiOff, 2*np.pi))
+
+            r[:,0:maxDlay+1]    = 0.5*np.ones((self.nCM,maxDlay+1), dtype=np.float32)
+        elif self.iniCond == 4:  # phase: random; amplitude: stabl 0.2
+            r   = 0.2*np.ones((self.C, int(self.tMax/self.dt + maxDlay), self.nModel), dtype=np.float32) # node phase parameter [C, Nsamples to integrate]
+            phi = np.float32(np.random.randn(self.C, int(self.tMax/self.dt + maxDlay), self.nModel)) # node phase parameter [C, Nsamples to integrate]
+            
+        else:
+            raise ValueError('not defined self.iniCond for such value')
+        if self.oneModel: # for testing
+            r   = r[:self.C,:]
+            phi = phi[:self.C,:]
+        else:
+            z = np.zeros((self.C,r.shape[1],self.nModel),dtype=np.complex64)
+            for i in range(self.nModel):
+                z[:,:,i] = r[i*self.C:(i+1)*self.C,:] * np.exp(1j* phi[i*self.C:(i+1)*self.C,:])
+        self.N = r.shape[1]
+        return z
+    
+    def _doNodeContainerSimHistory(self):
+        """ takes a delays history as input, typicaly the previous stage """
+        nh  = self.zhist.shape[1] # lenght [samples] of history
+        z   = np.zeros((self.C, int(nh+self.tMax/self.dt), self.nModel), dtype=np.complex64)
+        z[:,:nh,:] = self.zhist
+        return z
+        
+    def modelBehavior(self, z):
+        self.z = copy.deepcopy(z)
+        z = self.downsampleModel(z)
+        self.z_abs = WBLSmodel._absZ(z)
+        self.dPLI3d_v(z)
+        self.orders, self.metaL = self.doKuramotoOrder(z)    
+        
+    def meanEnvBuStage(self):
+        # mean envelope over stages
+        self.msEnv = np.zeros((self.C, self.nStates), dtype=np.float32)
+        self.mbEnv = np.zeros((self.C, self.nStates), dtype=np.float32)
+        for i in range(self.nStates):
+            # mean envelpe amplitude over stage samples and model
+            self.msEnv[:,i] = np.mean(self.z_abs[:, self.edges_n[i,0]:self.edges_n[i,1],:], axis=(1,2))  
+            self.mbEnv[:,i] = np.mean(self.z_abs[:, int(self.bump_n[i]-0.025*self.fs):int(self.bump_n[i]+0.025*self.fs),:], axis=(1,2))  
+            
+    def runModel(self):
+        self._doCouplingLSBMsimu()
+        self.getDelays(self.velocity)
+        z  = self._doNodeContainers(self.maxDlay)
+        z = WBLSmodel._KMAOcommuParZnV(z,self.maxDlay,self.dlayStep,self.fs,self.dt,self.Ksim,self.iXc, self.locaDyn)
+        self.modelBehavior(z)
+    
+    def runModelNoDel(self):
+        self._doCouplingLSBMsimu()
+        z = WBLSmodel._KMAOcommuParZnV_NoDel(self.zIni,self.fs,self.dt,self.Ksim, self.locaDyn)
+        self.modelBehavior(z)
+   
+    def runModelNoDel_in(self): 
+        """ pulse created in the model instance """
+        z = WBLSmodel._KMAOcommuParZnV_NoDel_in(self.zIni,self.fs,self.dt,self.Ksim, self.locaDyn,  self.impulse, self.edgePulse)
+        self.modelBehavior(z)
+        
+    def runModel_Pulsein(self): 
+        self._doCouplingLSBMsimu()
+        self.impulsePerturb()            # (z,       maxDlay,    dlayStep,       fs,     dt,     kS,      iXc,       localPara,      impulse,    edges_p)
+        edgePulse = self.edgePulse + self.maxDlay
+        z = WBLSmodel._KMAOcommuParZnV_in(self.zIni,self.maxDlay,self.dlayStep, self.fs,self.dt,self.Ksim, self.iXc, self.locaDyn,  self.impulse, edgePulse)
+        self.modelBehavior(z)
+
+    def runModelNoDel_Pulsein(self): 
+        self._doCouplingLSBMsimu()
+        self.impulsePerturb()            # (z,       maxDlay,    dlayStep,       fs,     dt,     kS,      iXc,       localPara,      impulse,    edges_p)
+        z = WBLSmodel._KMAOcommuParZnV_NoDel_in(self.zIni, self.fs,self.dt,self.Ksim, self.locaDyn,  self.impulse, self.edgePulse)
+        self.modelBehavior(z)
+        
+    def runModel_Pulsein_sequence(self): 
+        self._doCouplingLSBMsimu()
+        self.impulsePerturb()            # (z,       maxDlay,    dlayStep,       fs,     dt,     kS,      iXc,       localPara,      impulse,    edges_p)
+        edgePulse = self.edgePulse + self.maxDlay
+        z = WBLSmodel._KMAOcommuParZnV_in_sequen(self.zIni,self.maxDlay,self.dlayStep, self.fs,self.dt,self.Ksim, self.iXc, self.locaDyn,  self.impulse, edgePulse)
+        self.modelBehavior(z)
+
+        
+if __name__ == "__main__":
+    
+    stages = [0,1]
+    stages = 0
+    kao = WBLSmodel(stageIx=stages)
+    # kao.iniCond = 1
+    # kao.iniSeed = 1
+    kao._setHistDelayAndContainers()
+    kao._edgesImpulse()  
+    kao._getEmpiReativEnv()
+    kao.doNoise(variance=0)
+    
+    try:
+        kao.pulse = np.ones((68,len(stages)))*10
+    except:
+        kao.pulse = np.ones(68)*10
+
+    kao.runModel_Pulsein_noise()
+    kao.similarityAllPLIs()
+    kao.relativeEnvBuPre()
+    kao.getScorrelRelEnv()  
+
+        

+ 97 - 0
nodeRelvance4figure.ipynb

@@ -0,0 +1,97 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import numpy as np\n",
+    "%matplotlib qt\n",
+    "import matplotlib.pyplot as pl\n",
+    "import sys\n",
+    "import scipy as sp\n",
+    "from files4brainNetViewer import BNVfiles\n",
+    "import copy"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "pathRel = './data/relevanceOfPulses_G0p3L0p7.npz'\n",
+    "with np.load(pathRel) as data:\n",
+    "    sCorTe  = data['sCorTe']\n",
+    "    fitTe   = data['fitTe']\n",
+    "    pulseTe = data['pulseTe']\n",
+    "    fitRef  = data['fitRef']\n",
+    "    sCorRef = data['sCorRef']\n",
+    "    pliRef  = data['pliRef']"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "relevance = np.zeros(fitTe.shape)\n",
+    "match = np.zeros(pliRef.shape[1:])\n",
+    "for i in range(68):\n",
+    "    relevance[:,i,:] = fitRef - fitTe[:,i,:]\n",
+    "for i in range(4):\n",
+    "    match[:,i] = np.mean( np.sign(pliRef[:,:,i]) * kao.eFCs[i,:] ,axis=0)\n",
+    "MDrele  = np.median(relevance,axis=0)\n",
+    "MErele  = np.mean(relevance,axis=0)\n",
+    "SDrele  = np.std(relevance,axis=0)\n",
+    "MDpu    = np.median(pulseTe, axis=0)\n",
+    "MEpu    = np.mean(pulseTe, axis=0)\n",
+    "SDpu    = np.std(pulseTe, axis=0)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Do file to visualize nodes in matlab 3D brain"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "bnv = BNVfiles()\n",
+    "for i in range(4):\n",
+    "    name = 'NODESstage_' + str(i+1)\n",
+    "    bnv.doNodes( colors=MEpu[:,i], sizes=MErele[:,i], name=name)\n",
+    "    name = 'EDGESstage_' + str(i+1)\n",
+    "    bnv.doEdges(edges=kao.eFCs[i,:], weights=match[:,i], name=name)"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.7"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

+ 116 - 0
optimizationProcesses.py

@@ -0,0 +1,116 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Feb 23 09:45:26 2021
+
+@author: Oscar Portoles
+"""
+
+import numpy as np
+from modelStages import WBLSmodel
+import copy
+
+    
+class Opt_allROIpulse_Wenecor(WBLSmodel):
+    """OPtimization of pulses for several stages sequetially
+        With or without time delays (looks at self.velocity)
+        OPtimizes FC, energy and bump topology"""
+    def __init__(self,stageIx=0,vel=0,zh=None):
+        super().__init__(stageIx=stageIx,vel=vel,zh=zh)
+        
+        self.widthPulse   = 0.040   # [seconds] distance from center of bumps/pulse to state edges, width of pulse
+        self.offMidBump   = 0.01    # relative to the middel poit of a bump
+        
+        self._setHistDelayAndContainers()
+        self._edgesImpulse()  
+        self._getEmpiReativEnv()
+        
+    def get_name(self):
+        return "Opt. all ROI pulses, constrains, no delays, fixed G,L"
+    
+    def get_bounds(self):
+        """Boundaries on: pulses """
+        self.lowbound   = [-1000]*self.C
+        self.upbound    = [ 1000]*self.C
+        self.maxEner    = np.max([self.upbound[0], np.abs(self.lowbound[0])]) * self.C
+        return (self.lowbound, self.upbound)
+          
+    def fitness(self, x):
+        self.pulse      = np.float64(x)
+        if self.velocity == 0:
+            self.runModelNoDel_Pulsein()
+        else:
+            self.runModel_Pulsein()     
+        # cost function
+        self.relativeEnvBuPre()
+        self.getScorrelRelEnv()           
+        fitByStage = self.similarityPLIsWenergySperm()
+        # for constrains
+        self.meanEnvBuStage()
+        fitSum = np.sum(fitByStage)
+        if np.isnan(fitSum) or ( np.median(self.msEnv) < 0.01 ).any() or self.sCor < 0: # Penalize wrong solutions
+            fitSum = -1
+        # Keep logs
+        self.log.append( [np.float64(x),self.orders, self.metaL,fitByStage, self.pli.astype(np.float16), np.float16(fitSum), np.float16(self.sCor),
+                          self.msEnv.astype(np.float16), self.mbEnv.astype(np.float16), self.relEnv.astype(np.float16)] )
+        return np.array([-1* fitSum])
+
+          
+class FindAndKeepBestSolution(WBLSmodel):
+    """ Extracts the logs form an optimization, get parameters from best solution, 
+        run model and keep last samples. Used for squence of stages"""
+        
+    def extractBestPulse(self, logs):
+        maxFit = 0
+        for logIsla in logs:
+            fit = np.array([log_[3] for log_ in logIsla]).astype(np.float32)
+            if np.max(fit) > maxFit:
+                iXmax = np.argmax(fit)
+                self.bestPulse = np.array([log_[0] for log_ in logIsla])[iXmax,:].astype(np.float64)
+                
+    def getBestWeightedModel(self, logs, per):
+        """ @ logs: list, logs from UDP
+            @ per: float, percentile of best fitness to look for best solution"""
+        fit, corr, ener, pulse = [], [], [], []
+        for logIsla in logs:
+            fit.append(  np.array([log_[5] for log_ in logIsla]).ravel() )
+            ener.append( np.array([np.sum(np.abs(log_[0])) for log_ in logIsla] ).ravel() )
+            corr.append( np.array([log_[6] for log_ in logIsla]).ravel() )
+            pulse.append(np.array([log_[0] for log_ in logIsla] ) )
+            
+        fit     = np.hstack(fit)
+        ener    = np.hstack(ener)
+        corr    = np.hstack(corr)
+        pulse   = np.vstack(pulse) 
+        # top fitness pertcentile threshold
+        perTh   = np.percentile(fit, per)
+        mask    = fit >= perTh
+        fit, corr, ener, pulse = fit[mask], np.float32(corr[mask]), np.float32(ener[mask]), pulse[mask,:]
+        # best correlation-energy trade-off
+        iXbest = np.argmin( ener*(1-corr))
+        self.bestPulse  = pulse[iXbest, :].astype(np.float64)
+        self.bestEnergy, self.bestFit, self.bestCorr = ener[iXbest], fit[iXbest], corr[iXbest]
+        
+    def runBestModel(self):
+        self._setHistDelayAndContainers()
+        self._edgesImpulse() 
+        self._getEmpiReativEnv()
+        self.pulse      = copy.deepcopy(self.bestPulse)
+        self.maxEner    = 1000.0 * self.C
+        if self.velocity == 0:
+            self.runModelNoDel_Pulsein()
+        else:
+            self.runModel_Pulsein()
+        # cost function
+        self.relativeEnvBuPre()
+        self.getScorrelRelEnv()           
+        fitByStage = self.similarityPLIsWenergySperm()
+        # for constrains
+        self.meanEnvBuStage()
+        fitSum = np.sum(fitByStage)
+        # Keep logs
+        self.log.append( [self.pulse,self.orders, self.metaL,fitByStage, self.pli.astype(np.float16), np.float16(fitSum), np.float16(self.sCor),
+                          self.msEnv.astype(np.float16), self.mbEnv.astype(np.float16), self.relEnv.astype(np.float16)] )
+        return self.z[:, -(self.maxDlay+1):, :]
+                    
+        

+ 691 - 0
reproduce_Figs.ipynb

@@ -0,0 +1,691 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import numpy as np\n",
+    "import scipy as sp\n",
+    "import scipy.stats as sps\n",
+    "from scipy.io import loadmat\n",
+    "import matplotlib.pyplot as pl\n",
+    "from mpl_toolkits.axes_grid1.inset_locator import inset_axes\n",
+    "%matplotlib tk\n",
+    "from matplotlib import cm\n",
+    "from matplotlib.gridspec import GridSpec\n",
+    "from matplotlib import colors\n",
+    "import matplotlib \n",
+    "import copy\n",
+    "from itertools import combinations"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def getLobes():\n",
+    "    rois = loadmat('./data/dataMEGasoMeM/ROIsLoc.mat')\n",
+    "    lobes = rois['lobes']\n",
+    "    Blobes = lobes[:,0].tolist()\n",
+    "    bigLobes = []\n",
+    "    for lob in Blobes:\n",
+    "        bigLobes.append(lob[0])\n",
+    "        bigLobes.append(lob[0])\n",
+    "    bigLobes = np.array(bigLobes)\n",
+    "    \n",
+    "    Slobes = lobes[:,1].tolist()\n",
+    "    smallLobes = []\n",
+    "    for lob in Slobes:\n",
+    "        smallLobes.append(lob[0])\n",
+    "        smallLobes.append(lob[0])\n",
+    "    smallLobes = np.array(smallLobes)\n",
+    "    return bigLobes, smallLobes\n",
+    "\n",
+    "\n",
+    "def reOrderROis(x):\n",
+    "    \" x: [T, ROI, Stages]\"\n",
+    "    bigLobes, smallLobes = getLobes()\n",
+    "        # indexes to reorder matrix entris\n",
+    "    frontalOrigen = np.array([17, 20,47,50], dtype=np.int32) \n",
+    "    frontalTo     = np.array([21, 22,45,46], dtype=np.int32)\n",
+    "    parietOrigen  = np.array([18, 19, 21, 22,45,46,48,49], dtype=np.int32)\n",
+    "    parietTo      = np.array([17, 18, 19, 20,47,48,49,50], dtype=np.int32)\n",
+    "    bothOrigen    = np.hstack((frontalOrigen, parietOrigen))\n",
+    "    bothTo        = np.hstack((frontalTo, parietTo))\n",
+    "    # reorder matrix\n",
+    "    x_o = copy.deepcopy(x)\n",
+    "    x_o[:,bothTo,:] = x_o[:, bothOrigen,:]\n",
+    "    # indexes reorder lobes\n",
+    "    origenLobes = np.hstack((2*frontalOrigen, 2*frontalOrigen+1, 2*parietOrigen, 2*parietOrigen+1))\n",
+    "    toLobes = np.hstack((2*frontalTo, 2*frontalTo+1, 2*parietTo, 2*parietTo+1))\n",
+    "    # reorder lobes\n",
+    "    bigLobes = copy.deepcopy(bigLobes)\n",
+    "    bigLobes[toLobes] = bigLobes[origenLobes]\n",
+    "    smallLobes = copy.deepcopy(smallLobes)\n",
+    "    smallLobes[toLobes] = smallLobes[origenLobes]\n",
+    "    return x_o, bigLobes, smallLobes\n",
+    "\n",
+    "def colorBlobesYaxes(axco,bigLobes, lticks=10,wticks=5,bothAxis=False):\n",
+    "    nroi = 68\n",
+    "    axco.set_yticklabels(['']*nroi)\n",
+    "    axco.set_yticks(np.arange(0, nroi))\n",
+    "    axco.yaxis.set_tick_params(width=wticks)\n",
+    "    if bothAxis:\n",
+    "        axco.yaxis.set_ticks_position('both')\n",
+    "    else:\n",
+    "        axco.yaxis.set_ticks_position('left')\n",
+    "    axco.tick_params('both', length=lticks, labelright=False, labeltop=False, direction='out')\n",
+    "    for lobe, line in zip(bigLobes, axco.yaxis.get_ticklines()):\n",
+    "        if lobe == 'Temp':\n",
+    "            line.set_color('C0')\n",
+    "            lineT = line\n",
+    "        elif lobe == 'Occi':\n",
+    "            line.set_color('C1')\n",
+    "            lineO = line\n",
+    "        elif lobe == 'Front':\n",
+    "            line.set_color('C2')\n",
+    "            lineF = line\n",
+    "        elif lobe == 'Pari':\n",
+    "            line.set_color('C3')\n",
+    "            lineP = line\n",
+    "    return axco           "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "wiFC = [80,20,150] # Time windos time-locked to onset of stage\n",
+    "wiFCin = [80,20,120]  # Time windos time-locked to end of stage \n",
+    "colorStg = ['C0','C1','C2','C3','C5']\n",
+    "colorStg = ['C0','orange','green','red','purple']\n",
+    "nameSt = ['pre-encoding','encoding','retrieval','decision','motor']"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "file = './data/durationStages.mat'\n",
+    "data = loadmat(file)    \n",
+    "durStg = data['stagesR']"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "file   = './data/PLI8ENVbumperp.mat'\n",
+    "data   = loadmat(file)\n",
+    "pliW   = data['pliW']\n",
+    "pliWsd = data['pliWsd']\n",
+    "pli01  = data['fcTh']\n",
+    "envZS  = data['envZSW']\n",
+    "envZSo,bLobes, sLobes = reOrderROis(envZS)\n",
+    "\n",
+    "file   = './data/FCsigmeanBySj_res.mat'\n",
+    "data   = loadmat(file)\n",
+    "m_pliWSj   = data['mSigFCw']\n",
+    "m_NOpliWSj   = data['mNoSigFCw']\n",
+    "m_pliW     = [None]*5\n",
+    "se_pliW = [None]*5\n",
+    "m_NOpliW     = [None]*5\n",
+    "se_NOpliW = [None]*5\n",
+    "for i in range(5):\n",
+    "    m_pliW[i]    =  np.mean(m_pliWSj[i][0], axis=2) # mean acrss subjects\n",
+    "    se_pliW[i]   =  np.std(m_pliWSj[i][0], axis=2) / np.sqrt(18)\n",
+    "    m_NOpliW[i]  =  np.mean(m_NOpliWSj[i][0], axis=2) # mean acrss subjects\n",
+    "    se_NOpliW[i] =  np.std(m_NOpliWSj[i][0], axis=2) / np.sqrt(18)\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "imPath = '.data/imMEG/'\n",
+    "orint  = ['axi','sagi','sagi2'];\n",
+    "nSt = 4\n",
+    "nV  = 3\n",
+    "\n",
+    "imFC  = [[None]*nV for _ in range(nSt)]\n",
+    "for iSt in range(nSt):\n",
+    "    for iV in range(nV):\n",
+    "        filename = imPath + 'BN_' + orint[iV] + '_stg_' + str(iSt+1) + '.png'\n",
+    "        if iV == 0:\n",
+    "            im_ = pl.imread(filename)[350:-420,340:-290,:]\n",
+    "            imFC[iSt][iV] = copy.deepcopy(im_)\n",
+    "        else:\n",
+    "            im_ = pl.imread(filename)[60:-100,100:-50,:]\n",
+    "            imFC[iSt][iV] = copy.deepcopy(im_)\n",
+    "        if iV > 0 and iSt == 3:\n",
+    "            im_ = pl.imread(filename)[270:-330,370:-350,:]\n",
+    "            imFC[iSt][iV] = copy.deepcopy(im_) "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Figure 1"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "## one subplot per brain  ##\n",
+    "simOn = np.int32(np.cumsum(np.round(np.mean(durStg,axis=0)/10)[1:])) # samples at 1/100\n",
+    "durM  = np.int32(np.round(np.mean(durStg,axis=0)/10))\n",
+    "simOn = np.insert(simOn, 0, 0) + 9\n",
+    "onM    = np.insert(np.cumsum(np.mean(durStg,axis=0)),0,0) # samples at 1/100\n",
+    "duraM  = np.mean(durStg,axis=0) # samples at 1/100\n",
+    "# axes and subplots\n",
+    "leftOff, rightOff = 0.05, 0.95\n",
+    "topOff, bottOff = 0.91, 0.09\n",
+    "gridHspace = 0.1\n",
+    "gridWspace = 0.2\n",
+    "totWidth = rightOff-leftOff\n",
+    "totHeight = topOff-bottOff\n",
+    "fig = pl.figure()\n",
+    "fig.set_figheight(4.5)\n",
+    "fig.set_figwidth(7)\n",
+    "fig.set_dpi(900)\n",
+    "fontS = 6\n",
+    "lTicks = 2\n",
+    "# matplotlib.rcParams.update({'font.size': fontS,'font.stretch' : 'ultra-condensed' })\n",
+    "matplotlib.rcParams['font.stretch'] = 'ultra-condensed'\n",
+    "matplotlib.rcParams['font.size'] = fontS\n",
+    "matplotlib.rcParams['lines.linewidth'] = 0.5\n",
+    "matplotlib.rcParams['patch.linewidth'] = 0.5\n",
+    "matplotlib.rcParams['axes.linewidth'] = 0.25 \n",
+    "# matplotlib.rcParams['axes.labelpad'] = 1\n",
+    "matplotlib.rcParams['xtick.labelsize'] = fontS\n",
+    "matplotlib.rcParams['ytick.labelsize'] = fontS\n",
+    "matplotlib.rcParams['xtick.direction'] = 'inout'\n",
+    "matplotlib.rcParams['ytick.direction'] = 'inout'\n",
+    "matplotlib.rcParams['xtick.major.width'] = 0.25\n",
+    "matplotlib.rcParams['ytick.major.width'] = 0.25\n",
+    "matplotlib.rcParams['xtick.major.size'] = lTicks\n",
+    "matplotlib.rcParams['ytick.major.size'] = lTicks\n",
+    "matplotlib.rcParams['xtick.major.pad'] = 1\n",
+    "matplotlib.rcParams['ytick.major.pad'] = 1\n",
+    "\n",
+    "gridHratios = [0.04,0.23,0.23, 0.12,0.21,0.09]\n",
+    "gs  = GridSpec(6,20,height_ratios=gridHratios,wspace=gridWspace,hspace=gridHspace,\n",
+    "              left=leftOff, right=rightOff, top=topOff, bottom=bottOff) # 2 rows, 3 columns\n",
+    "axBr = np.empty((2,4),dtype=object)\n",
+    "axS  = np.empty((3,4),dtype=object)\n",
+    "axBl = []\n",
+    "axCB = []\n",
+    "axCBon = np.arange(totWidth/8,totWidth+0.01-totWidth/8,totWidth/4)\n",
+    "\n",
+    "for iX, i in enumerate(range(0,20,5)):\n",
+    "    axBl.append(fig.add_subplot(gs[1:3,i:i+2]))\n",
+    "    axBr[0,iX] = fig.add_subplot(gs[1,i+2:i+5])\n",
+    "    axBr[1,iX] = fig.add_subplot(gs[2,i+2:i+5])\n",
+    "    axCB.append(fig.add_axes([leftOff+axCBon[iX]-0.1,0.53,0.07,0.01]))\n",
+    "    for jX, j in enumerate(range(3,6)):\n",
+    "        axS[jX, iX] = fig.add_subplot(gs[j,i:i+5])\n",
+    "ax0  = fig.add_subplot(gs[0,:])\n",
+    "\n",
+    "# stages overview\n",
+    "for i in range(5):\n",
+    "    ax0.barh(y = 0,width=duraM[i], left=onM[i], height=0.5, color=colorStg[i])#,\n",
+    "    ax0.text(onM[i] + duraM[i]/2, y=0, s=nameSt[i], ha='center', va='center',color='w')\n",
+    "ax0.set_xlim([0,onM[-1]])\n",
+    "ax0.xaxis.tick_top()\n",
+    "ax0.spines['right'].set_visible(False)\n",
+    "ax0.spines['bottom'].set_visible(False)\n",
+    "ax0.spines['left'].set_visible(False)\n",
+    "ax0.set_yticks([])\n",
+    "ax0.set_xlabel('Time [ms]')\n",
+    "ax0.xaxis.set_label_position('top') \n",
+    "ax0.text(0., -0.5,'Onset Stimuli',ha='center',va='center')\n",
+    "ax0.text(onM[-1], -0.5,'Response',ha='center',va='center')\n",
+    "\n",
+    "# # FC brain\n",
+    "for iSt in range(nSt):\n",
+    "    axBl[iSt].imshow(imFC[iSt][0])\n",
+    "    axBl[iSt].axis('off')\n",
+    "    # colorbar\n",
+    "    cmCol_ = cm.ScalarMappable( cmap = pl.get_cmap('cool'))\n",
+    "    cmCol_.set_array([])\n",
+    "    cm_ = fig.colorbar(cmCol_, cax=axCB[iSt], ticks=[0, 1],orientation='horizontal')\n",
+    "    minSiso_ = MEGsiso[:,iSt].min()\n",
+    "    maxSiso_ = MEGsiso[:,iSt].max()\n",
+    "    middle_  = np.abs(minSiso_) / (maxSiso_ - minSiso_)\n",
+    "    cm_.set_ticks([0,middle_,1])\n",
+    "    cm_.set_ticklabels([str(minSiso_),'0', str(maxSiso_)]) # reverted because data was for BrainNet\n",
+    "    #\n",
+    "    for iV in range(2):\n",
+    "        axBr[iV, iSt].imshow(imFC[iSt][iV+1])\n",
+    "        axBr[iV, iSt].axis('off')\n",
+    "fig.text(leftOff+0.01,0.55, \"in - out deg.\", fontsize=fontS)\n",
+    "\n",
+    "\n",
+    "# FC over time\n",
+    "minmax = [0.038,0.053]\n",
+    "for i in range(4):\n",
+    "    minVal = 10\n",
+    "    maxVal = -10\n",
+    "    axS[0,i].plot(m_NOpliW[i][wiFC[0]:wiFC[-1],i],c='gray')#,ls='--',linewidth=2)\n",
+    "    ste_up   = m_NOpliW[i][wiFC[0]:wiFC[-1],i] + se_NOpliW[i][wiFC[0]:wiFC[-1],i]\n",
+    "    ste_down = m_NOpliW[i][wiFC[0]:wiFC[-1],i] - se_NOpliW[i][wiFC[0]:wiFC[-1],i]\n",
+    "    axS[0,i].fill_between(np.arange(ste_up.shape[0]), ste_up, ste_down, color='gray',alpha=0.5)\n",
+    "    minVal = np.min([minVal, ste_down.min()])\n",
+    "    maxVal = np.max([maxVal, ste_up.max()])\n",
+    "     \n",
+    "    axS[0,i].plot(m_pliW[i][wiFC[0]:wiFC[-1],i], c=colorStg[i+1])#,linewidth=2)\n",
+    "    ste_up   = m_pliW[i][wiFC[0]:wiFC[-1],i] + se_pliW[i][wiFC[0]:wiFC[-1],i]\n",
+    "    ste_down = m_pliW[i][wiFC[0]:wiFC[-1],i] - se_pliW[i][wiFC[0]:wiFC[-1],i]\n",
+    "    axS[0,i].fill_between(np.arange(ste_up.shape[0]), ste_up, ste_down, color=colorStg[i+1],alpha=0.3) \n",
+    "    minVal = np.min([minVal, ste_down.min()])\n",
+    "    maxVal = np.max([maxVal, ste_up.max()])\n",
+    "              \n",
+    "    axS[0,i].axvspan(0, wiFC[1], facecolor='silver', alpha=0.7)\n",
+    "    axS[0,i].axvspan(wiFC[1]+durM[i+1],wiFC[-1]-wiFC[0] , facecolor='silver', alpha=0.7)            \n",
+    "    axS[0,i].axvline(wiFC[1]-3, ymin=0, ymax=1, c='k')\n",
+    "    axS[0,i].set_xlim([0,(wiFC[2]-wiFC[0])])\n",
+    "    axS[0,i].axes.xaxis.set_ticklabels([])\n",
+    "    axS[0,i].set_xticks(np.arange(2,(wiFC[2]-wiFC[0]), 15))    \n",
+    "    axS[0,i].xaxis.set_ticks_position('both')\n",
+    "    axS[0,i].set_yticks([minVal, maxVal])\n",
+    "    axS[0,i].axes.yaxis.set_ticklabels([np.round(minVal,3), np.round(maxVal,3)], ha='left',ma='left')\n",
+    "    axS[0,i].tick_params(axis=\"y\",direction=\"inout\", pad=-3)\n",
+    "    axS[0,i].yaxis.get_major_ticks()[0].label1.set_verticalalignment('bottom') \n",
+    "    axS[0,i].yaxis.get_major_ticks()[-1].label1.set_verticalalignment('top')      \n",
+    "axS[0,0].set_ylabel('across-trials\\nmean dpFC',fontsize=fontS)\n",
+    "\n",
+    "# # INSET RESPONSE\n",
+    "axins = axS[0, 3].inset_axes([0.55, 0.85, 0.44, 0.44])\n",
+    "wiFCin = [100-(durM[-1]),(durM[-1]) ,105] \n",
+    "\n",
+    "# significant\n",
+    "axins.plot(m_pliW[-1][wiFCin[0]:wiFCin[-1],-1], c=colorStg[4])#,linewidth=2)\n",
+    "ste_up   = m_pliW[-1][wiFCin[0]:wiFCin[-1],-1] + se_pliW[-1][wiFCin[0]:wiFCin[-1],-1]\n",
+    "ste_down = m_pliW[-1][wiFCin[0]:wiFCin[-1],-1] - se_pliW[-1][wiFCin[0]:wiFCin[-1],-1]\n",
+    "axins.fill_between(np.arange(ste_up.shape[0]), ste_up, ste_down, color=colorStg[4],alpha=0.3) \n",
+    "# NOT significant\n",
+    "axins.plot(m_NOpliW[-1][wiFCin[0]:wiFCin[-1],-1],c='gray')#,ls='--',linewidth=2)\n",
+    "ste_up   = m_NOpliW[-1][wiFCin[0]:wiFCin[-1],-1] + se_NOpliW[-1][wiFCin[0]:wiFCin[-1],-1]\n",
+    "ste_down = m_NOpliW[-1][wiFCin[0]:wiFCin[-1],-1] - se_NOpliW[-1][wiFCin[0]:wiFCin[-1],-1]\n",
+    "axins.fill_between(np.arange(ste_up.shape[0]), ste_up, ste_down, color='gray',alpha=0.5)\n",
+    "   \n",
+    "axins.set_xlim([0,m_pliW[-1][wiFCin[0]:wiFCin[-1],-1].shape[0]]) \n",
+    "axins.axvline(wiFCin[1], ymin=0, ymax=1, c='k')\n",
+    "axins.axvspan(0, 5, facecolor='silver', alpha=0.7)\n",
+    "axins.axvspan(durM[-1],wiFCin[-1]-wiFCin[0] , facecolor='silver', alpha=0.7)  \n",
+    "axins.set_yticklabels([])\n",
+    "axins.set_xticklabels([])\n",
+    "axins.set_xticks([])\n",
+    "axins.set_yticks([])\n",
+    "\n",
+    "iSt = 2\n",
+    "# # INSET RETRIEVAL\n",
+    "axins2 = axS[0, 1].inset_axes([0.55, 0.85, 0.44, 0.44])\n",
+    "wiFCin = [100-(durM[iSt]),(durM[iSt]) ,105] \n",
+    "\n",
+    "# significant\n",
+    "axins2.plot(m_pliW[iSt][wiFCin[0]:wiFCin[-1],iSt+1], c=colorStg[iSt])#,linewidth=2)\n",
+    "ste_up   = m_pliW[iSt][wiFCin[0]:wiFCin[-1],iSt+1] + se_pliW[iSt][wiFCin[0]:wiFCin[-1],iSt+1]\n",
+    "ste_down = m_pliW[iSt][wiFCin[0]:wiFCin[-1],iSt+1] - se_pliW[iSt][wiFCin[0]:wiFCin[-1],iSt+1]\n",
+    "axins2.fill_between(np.arange(ste_up.shape[0]), ste_up, ste_down, color=colorStg[iSt],alpha=0.3) \n",
+    "# NOT significant\n",
+    "axins2.plot(m_NOpliW[iSt][wiFCin[0]:wiFCin[-1],iSt+1],c='gray')#,ls='--',linewidth=2)\n",
+    "ste_up   = m_NOpliW[iSt][wiFCin[0]:wiFCin[-1],iSt+1] + se_NOpliW[iSt][wiFCin[0]:wiFCin[-1],iSt+1]\n",
+    "ste_down = m_NOpliW[iSt][wiFCin[0]:wiFCin[-1],iSt+1] - se_NOpliW[iSt][wiFCin[0]:wiFCin[-1],iSt+1]\n",
+    "axins2.fill_between(np.arange(ste_up.shape[0]), ste_up, ste_down, color='gray',alpha=0.5)\n",
+    "   \n",
+    "axins2.set_xlim([0,m_pliW[iSt][wiFCin[0]:wiFCin[-1],iSt].shape[0]]) \n",
+    "axins2.axvline(wiFCin[1], ymin=0, ymax=1, c='k')\n",
+    "axins2.axvspan(0, 5, facecolor='silver', alpha=0.7)\n",
+    "axins2.axvspan(durM[iSt],wiFCin[-1]-wiFCin[0] , facecolor='silver', alpha=0.7)  \n",
+    "axins2.set_yticklabels([])\n",
+    "axins2.set_xticklabels([])\n",
+    "axins2.set_xticks([])\n",
+    "axins2.set_yticks([])\n",
+    "\n",
+    "# z-score envelope ERP\n",
+    "for i in range(4):\n",
+    "    axS[1,i].matshow(envZS[wiFC[0]-0:wiFC[-1]-0,:,i].T, aspect='auto')#,cmap='plasma') # independent scale for each stage \n",
+    "    axS[1,i].axvline(wiFC[1]-3, ymin=0, ymax=1, c='k')\n",
+    "    axS[1,i].set_xlim([0,(wiFC[2]-wiFC[0])])\n",
+    "    axS[1,i].set_xticks(np.arange(2,(wiFC[2]-wiFC[0]),15))    \n",
+    "    axS[1,i].axes.xaxis.set_ticklabels([])\n",
+    "    axS[1,i].axvline(wiFC[1]+2.6, ymin=0, ymax=1, c='m',linestyle='-')# c='C7',linestyle='--')\n",
+    "    axS[1,i].axvline(wiFC[1]-2.5, ymin=0, ymax=1, c='m',linestyle='-')# c='C7',linestyle='--')\n",
+    "    axS[1,i].axvline(wiFC[1]-3.5, ymin=0, ymax=1, c='m',linestyle='-')# c='C7',linestyle='--')\n",
+    "    axS[1,i].axvline(wiFC[1]-8.8, ymin=0, ymax=1, c='m',linestyle='-')# c='C7',linestyle='--')\n",
+    "    if i == 3:\n",
+    "        colorBlobesYaxes(axS[1,i],bLobes, lticks=4,wticks=1.2, bothAxis=True)\n",
+    "    else:\n",
+    "        colorBlobesYaxes(axS[1,i],bLobes, lticks=4,wticks=1.2)\n",
+    "    axS[1,i].axes.tick_params(axis='x',direction='inout', length=lTicks)\n",
+    "axS[1,0].set_ylabel('Cortical ROIs \\nLeft|Right H.', labelpad=0.1,fontsize=fontS)\n",
+    "    \n",
+    "# durations\n",
+    "minmax = [[0.0,0.02],[0.0,0.005],[0.0,0.02],[0.0,0.005]]\n",
+    "for i in range(4):\n",
+    "    axS[2,i].hist(durStg[:,i+1]+200,bins=100,density=True,histtype='step',color=colorStg[i+1]);#, range=(0,1500))\n",
+    "    axS[2,i].set_ylim(minmax[i])\n",
+    "    dens, bins = np.histogram(durStg[:,i+1]+wiFC[1]*10,bins=100,density=True);#, range=(0,1500))\n",
+    "    axS[2,i].axvline(wiFC[1]*10-30, ymin=0, ymax=1, c='k')\n",
+    "    axS[2,i].axvline(bins[0], ymin=0, ymax=1,c='w',lw=1)\n",
+    "    axS[2,i].set_xlim([0, (wiFC[2]-wiFC[0])*10])\n",
+    "    axS[2,i].set_xticks(np.arange(20,(wiFC[2]-wiFC[0])*10,150))    \n",
+    "    axS[2,i].set_xticklabels([str(j) for j in range(-150,(wiFC[2]-np.sum(wiFC[:2]))*10,150)])\n",
+    "    axS[2,i].xaxis.set_ticks_position('both')\n",
+    "    if np.mod(i,2): #0:even, 1:odd\n",
+    "        axS[2,i].tick_params(axis=\"y\",direction=\"inout\", pad=-24)\n",
+    "    else:\n",
+    "        axS[2,i].tick_params(axis=\"y\",direction=\"inout\", pad=-16)\n",
+    "    axS[2,i].set_xlabel('Stage Durations [ms]')\n",
+    "    axS[2,0].set_ylabel(\"Probability \\n Density\",fontsize=fontS)\n",
+    "    axS[2,i].set_yticks(np.array([minmax[i][0],np.mean(minmax[i]),minmax[i][1]]))\n",
+    "    axS[2,i].yaxis.get_major_ticks()[0].label1.set_verticalalignment('bottom') \n",
+    "    axS[2,i].yaxis.get_major_ticks()[-1].label1.set_verticalalignment('top')  \n",
+    "    \n",
+    "# guidig lines top\n",
+    "axL = fig.add_axes([leftOff,0.5,0.9,0.38])\n",
+    "axL.set_ylim([-1.5,1])\n",
+    "Ton = np.cumsum(duraM)*1.05-5\n",
+    "axL.set_xlim([-5,Ton[-1]+5])\n",
+    "tB = np.arange(0,Ton[-1]+0.01,Ton[-1]/4)-5\n",
+    "tB[0] = tB[0]-1\n",
+    "tB[2] = tB[2]+4\n",
+    "tB[3] = tB[3]+8\n",
+    "tB[-1] = tB[-1]+11\n",
+    "\n",
+    "offLSt = 1.5\n",
+    "\n",
+    "for i in range(4):\n",
+    "    axL.plot(np.array([tB[i],tB[i],Ton[i]])+offLSt,[-1.5,0.5,1],c=colorStg[i+1],zorder=100)\n",
+    "    axL.plot(np.array([Ton[i+1],tB[i+1],tB[i+1]])-offLSt,[1,0.5,-1.5],c=colorStg[i+1],zorder=10)\n",
+    "axL.axis('off')\n",
+    "\n",
+    "# idetifiers\n",
+    "yLoc = (np.cumsum(np.flipud(gridHratios))) * (1-bottOff) + bottOff/2\n",
+    "ids  = ['(E)','(D)','(C)','(B)','(A)']\n",
+    "for iX, i in enumerate([0,1,2,3,5]):\n",
+    "    fig.text(0.96, yLoc[i],ids[iX],ha='left',va='center',fontweight='bold')\n",
+    "\n",
+    "# fname  = './dataSave/Figure1B_insRetResMean.png'  \n",
+    "# pl.savefig(fname, dpi=None, facecolor='w', edgecolor=None,\n",
+    "#         orientation='portrait', papertype=None, format='png',\n",
+    "#         transparent=False, bbox_inches=None, pad_inches=0.,\n",
+    "#         frameon=None)\n",
+    "# pl.close('all')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "For Fig 2"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "imPath = '.data/imSIM/\n",
+    "orint  = ['axi','sagi','sagi2'];\n",
+    "nSt = 4\n",
+    "nV  = 3\n",
+    "\n",
+    "imFCs  = [[None]*nV for _ in range(nSt)]\n",
+    "for iSt in range(nSt):\n",
+    "    for iV in range(nV):\n",
+    "        filename = imPath + 'BN_' + orint[iV] + '_stg_' + str(iSt+1) + '.png'\n",
+    "        if iV == 0:\n",
+    "            im_ = pl.imread(filename)[100:-100,180:-180,:]\n",
+    "            imFCs[iSt][iV] = copy.deepcopy(im_)\n",
+    "        else:\n",
+    "            im_ = pl.imread(filename)[60:-100,100:-50,:]\n",
+    "            imFCs[iSt][iV] = copy.deepcopy(im_)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "file = '.data/metaFC1000afterPulse_G0p3L0p7.npz'\n",
+    "with np.load(file) as data:\n",
+    "    siMglo = data['metaG']\n",
+    "    siMloc = data['metaL']\n",
+    "    si2fc  = data['sim2eFC']\n",
+    "\n",
+    "siMloc\n",
+    "fs = 100\n",
+    "t  = np.linspace(1/fs,siMglo.shape[1]/fs,siMglo.shape[1])\n",
+    "tR = np.arange(1/fs,1.5+1/fs,1/fs) # trial length\n",
+    "nR = tR.shape[0]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Random fit"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "eFC    = np.float32(loadmat('./data/FCstagesDTIrois.mat')['fc1d'])\n",
+    "eFCs   = np.sign(eFC[2:,:])\n",
+    "nSt    = eFCs.shape[0] \n",
+    "pairs = np.array(list(combinations(range(68),2)), dtype=np.int32)\n",
+    "\n",
+    "def fit2eFCs(ref, test):\n",
+    "    return np.sum(ref * test, axis=1) / np.sum(np.abs(ref), axis=1)\n",
+    "\n",
+    "def doFC(argZ):\n",
+    "    \"@ fc: [nSt, roi]\"\n",
+    "    arg = np.exp(1j*argZ)\n",
+    "    return np.sign(np.imag(arg[:,pairs[:,0]] * np.conjugate(arg[:,pairs[:,1]])))\n",
+    "\n",
+    "savedTo = './data/relevanceOfPulses_G0p3L0p7.npz'\n",
+    "with np.load(savedTo)  as data:\n",
+    "    fitRef   = data['fitRef']\n",
+    "    \n",
+    "N = 20000\n",
+    "raFit = np.zeros((nSt,N))\n",
+    "for i in range(N):   \n",
+    "    testFC = doFC(np.random.uniform(-np.pi,np.pi,(nSt, 68))) \n",
+    "    raFit[:,i] = fit2eFCs(eFCs, testFC)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Fig 2"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "## one subplot per brain  ##\n",
+    "simOn = np.int32(np.cumsum(np.round(np.mean(durStg,axis=0)/10)[1:])) # samples at 1/100\n",
+    "durM  = np.int32(np.round(np.mean(durStg,axis=0)/10))\n",
+    "simOn = np.insert(simOn, 0, 0) + 12\n",
+    "\n",
+    "fig = pl.figure()\n",
+    "\n",
+    "gridHratios = [0.15,0.35,0.35,0.15]\n",
+    "leftOff, rightOff = 0.05, 0.95\n",
+    "topOff, bottOff = 0.91, 0.09\n",
+    "gridHspace = 0.01 #0.01\n",
+    "gridWspace = 0.05 #0.02\n",
+    "totWidth = rightOff-leftOff\n",
+    "totHeight = topOff-bottOff\n",
+    "fig = pl.figure()\n",
+    "fig.set_figheight(3.)\n",
+    "fig.set_figwidth(7)\n",
+    "fig.set_dpi(900)\n",
+    "fontS = 6\n",
+    "lTicks = 2\n",
+    "# matplotlib.rcParams.update({'font.size': fontS,'font.stretch' : 'ultra-condensed' })\n",
+    "matplotlib.rcParams['font.stretch'] = 'ultra-condensed'\n",
+    "matplotlib.rcParams['font.size'] = fontS\n",
+    "matplotlib.rcParams['lines.linewidth'] = 0.5\n",
+    "matplotlib.rcParams['patch.linewidth'] = 15\n",
+    "matplotlib.rcParams['axes.linewidth'] = 0.25 \n",
+    "# matplotlib.rcParams['axes.labelpad'] = 1\n",
+    "matplotlib.rcParams['xtick.labelsize'] = fontS\n",
+    "matplotlib.rcParams['ytick.labelsize'] = fontS\n",
+    "matplotlib.rcParams['xtick.direction'] = 'inout'\n",
+    "matplotlib.rcParams['ytick.direction'] = 'inout'\n",
+    "matplotlib.rcParams['xtick.major.width'] = 0.25\n",
+    "matplotlib.rcParams['ytick.major.width'] = 0.25\n",
+    "matplotlib.rcParams['xtick.major.size'] = lTicks\n",
+    "matplotlib.rcParams['ytick.major.size'] = lTicks\n",
+    "matplotlib.rcParams['xtick.major.pad'] = 1\n",
+    "matplotlib.rcParams['ytick.major.pad'] = 1\n",
+    "\n",
+    "gs  = GridSpec(4,8,height_ratios=gridHratios,wspace=gridWspace,hspace=gridHspace,\n",
+    "              left=leftOff, right=rightOff, top=topOff, bottom=bottOff) # 2 rows, 3 columns\n",
+    "axs0l,axs1,axs2 = [],[],[]\n",
+    "axs0r = np.empty((2,4),dtype=object)\n",
+    "for iX, i in enumerate(range(0,8,2)):\n",
+    "    axs0l.append(fig.add_subplot(gs[1:3,i])) # First row, first column\n",
+    "    axs0r[0,iX] = fig.add_subplot(gs[1,i+1])\n",
+    "    axs0r[1,iX] = fig.add_subplot(gs[2,i+1])\n",
+    "    axs1.append(fig.add_subplot(gs[3,i:i+2])) # S\n",
+    "    axs2.append(fig.add_subplot(gs[0,i:i+2])) # RAndom\n",
+    "\n",
+    "# # FC brain\n",
+    "for iSt in range(nSt):\n",
+    "    axs0l[iSt].imshow(imFCs[iSt][0])\n",
+    "    axs0l[iSt].axis('off')\n",
+    "    for iV in range(2):\n",
+    "        axs0r[iV, iSt].imshow(imFCs[iSt][iV+1])\n",
+    "        axs0r[iV, iSt].axis('off')\n",
+    "        \n",
+    "# Random distributions    \n",
+    "for i in range(4):\n",
+    "    axs2[i].set_ylim([0,7])\n",
+    "    axs2[i].set_xlim([-1.2,1.2])\n",
+    "    axs2[i].spines['right'].set_visible(False)\n",
+    "    axs2[i].spines['top'].set_visible(False)\n",
+    "    axs2[i].spines['left'].set_visible(False)\n",
+    "    axs2[i].spines['bottom'].set_bounds(-1, 1)\n",
+    "    axs2[i].axes.get_yaxis().set_visible(False)\n",
+    "    if i > 0:\n",
+    "        axs2[i].spines['left'].set_visible(False)\n",
+    "    axs2[i].hist(raFit[i,:],30, density=True)\n",
+    "    axs2[i].vlines(np.min(fitRef[:,i]),0,5,'r')    \n",
+    "    axs2[0].set_xticks([-1,-0.5,0,0.5,1])\n",
+    "    axs2[0].set_xticklabels(['-1','-0.5','0','0.5','1'])\n",
+    "\n",
+    "axs2[0].legend(['min(top 1%)', 'random'], loc=2, frameon=False,fancybox=False, \n",
+    "               title=\"dpFC Fitenss\",labelspacing=0.05, fontsize=fontS, \n",
+    "               markerscale=1, numpoints=1, handlelength=1,borderaxespad=0,\n",
+    "              handletextpad=0.1)\n",
+    "\n",
+    "mimaFC = [-0.5, 1]\n",
+    "wiSim = np.arange(wiFC[-1]-wiFC[0])\n",
+    "for i in range(4):\n",
+    "    wiIx = wiSim + simOn[i] - wiFC[1] + i\n",
+    "    wiIx[wiIx<0] = 0\n",
+    "    for j in range(4):\n",
+    "        fcst = si2fc[j,wiIx].T\n",
+    "        if i == j:\n",
+    "            axs1[i].plot(fcst, c=colorStg[j+1])\n",
+    "        else:\n",
+    "            axs1[i].plot(fcst,'--', c=colorStg[j+1])\n",
+    "            \n",
+    "    axs1[i].axvline(wiFC[1]-3,  c='k')\n",
+    "    axs1[i].set_ylim(mimaFC)\n",
+    "    axs1[i].set_yticks([])   \n",
+    "    axs1[i].axvspan(0, wiFC[1], facecolor='silver', alpha=0.7)\n",
+    "    axs1[i].axvspan(wiFC[1]+durM[i+1],wiFC[-1]-wiFC[0] , facecolor='silver', alpha=0.7)\n",
+    "    axs1[i].set_xticks(np.arange(2,(wiFC[2]-wiFC[0]),15))\n",
+    "    axs1[i].set_xticklabels([str(i) for i in range(-150,(wiFC[2]-np.sum(wiFC[:2]))*10,150)])\n",
+    "    axs1[i].set_xlabel('Time [ms]')\n",
+    "    axs1[i].set_xlim([0,(wiFC[2]-wiFC[0])])\n",
+    "\n",
+    "axs1[0].set_ylabel('Fit to MEG \\n within-stage dpFC',labelpad=4)\n",
+    "\n",
+    "# colorbars\n",
+    "labelsC = ['Accuracy', 'Input',]\n",
+    "axc = [None,None]\n",
+    "cmaps = [None,None]\n",
+    "axc[0] = fig.add_axes([0.05,0.4, 0.005, 0.1])\n",
+    "axc[1] = fig.add_axes([0.05,0.6, 0.005, 0.1])\n",
+    "cmaps[0] = cm.ScalarMappable( cmap = pl.get_cmap('cool'))\n",
+    "cmaps[1] = cm.ScalarMappable( cmap = pl.get_cmap('autumn'))\n",
+    "for i in range(2):\n",
+    "#     cmaps[i].colorbar.set_lable('left')\n",
+    "    cmaps[i].set_array([])\n",
+    "    cm_ = fig.colorbar(cmaps[i], cax=axc[i], ticks=[0, 1])\n",
+    "    cm_.set_label(labelsC[i], labelpad=1,horizontalalignment='right')\n",
+    "    axc[i].yaxis.set_ticks_position('left')\n",
+    "    axc[i].yaxis.set_label_position('left')\n",
+    "    \n",
+    "axc[1].yaxis.set_ticklabels(['-800','550' ])  # horizontal colorbar\n",
+    "axc[0].yaxis.set_ticklabels(['0','1' ])  # horizontal colorbar\n",
+    "    \n",
+    "# idetifiers\n",
+    "yLoc = (np.cumsum(np.flipud(gridHratios))) * (1-bottOff-0.05)*0.8 + bottOff\n",
+    "yLoc = [0.2,0,0.5,0.8]\n",
+    "ids  = ['(C)','(B)','(A)']\n",
+    "for iX, i in enumerate([0,2,3]):\n",
+    "    fig.text(0.96, yLoc[i],ids[iX],ha='left',va='center',fontweight='bold')  \n",
+    "    \n",
+    "# fname  = '.dataSave/figure2mean.svg'  \n",
+    "# pl.savefig(fname, dpi=None, facecolor='w', edgecolor=None,\n",
+    "#         orientation='portrait', papertype=None, format='svg',\n",
+    "#         transparent=False, bbox_inches=None, pad_inches=0.,\n",
+    "#         frameon=None)\n",
+    "# pl.close('all')\n"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.7"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

+ 347 - 0
reproduce_SI_fig.ipynb

@@ -0,0 +1,347 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import numpy as np\n",
+    "import scipy as sp\n",
+    "import scipy.stats as sps\n",
+    "from scipy.io import loadmat\n",
+    "import scipy.signal as sig\n",
+    "import matplotlib.pyplot as pl\n",
+    "from mpl_toolkits.axes_grid1.inset_locator import inset_axes\n",
+    "%matplotlib tk\n",
+    "from mpl_toolkits.mplot3d import Axes3D\n",
+    "from matplotlib import cm\n",
+    "from matplotlib.patches import Circle\n",
+    "from matplotlib.gridspec import GridSpec\n",
+    "import copy\n",
+    "%matplotlib tk\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Resting state dynamics, SI fig 1"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "pathFile = './data/rest/Globals_v5ms_grid_globalC_'\n",
+    "globalC = np.concatenate((np.array([0.01, 0.05, 0.1, 0.15,0.2,0.25]), np.arange(0.3,2.,0.05)))\n",
+    "\n",
+    "nL = 13\n",
+    "nG = globalC.shape[0]\n",
+    "nMo = 25\n",
+    "T = 60000\n",
+    "\n",
+    "mG = np.empty((13,globalC.shape[0]))\n",
+    "mL = np.empty((13,globalC.shape[0]))\n",
+    "mGt = np.empty((13,globalC.shape[0], T, nMo), dtype=np.float32)\n",
+    "mL[:] = np.nan\n",
+    "mG[:] = np.nan\n",
+    "\n",
+    "for iX, lC in enumerate(globalC):\n",
+    "    file = pathFile + str(np.round(lC,3)) + '_co.npz'\n",
+    "    with np.load(file) as data:\n",
+    "        mG[:,iX] = data['gMet'].ravel()\n",
+    "        mL[:,iX] = data['lMet'].ravel()\n",
+    "        mGt[:,iX,:,:] = np.squeeze(data['gMetT'])\n",
+    "        localC  = data['localC']\n",
+    "\n",
+    "mGc = np.mean(np.std(mGt,axis=2),axis=2)\n",
+    "\n",
+    "mL[np.isnan(mL)] = np.nanmin(mL)\n",
+    "mG[np.isnan(mG)] = np.nanmin(mG)\n",
+    "mGc[np.isnan(mGc)] = np.nanmin(mGc)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "autocorrelation of global metatability"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "acMet = np.zeros((nL, nG))\n",
+    "\n",
+    "for i, iXl in enumerate(range(localC.shape[0])):\n",
+    "    for iXg in range(globalC.shape[0]):\n",
+    "        acM = 0\n",
+    "        for iXm in range(nMo): # number of models\n",
+    "#             autocorrelation\n",
+    "            y = mGt[iXl, iXg, :, iXm] - np.mean(mGt[iXl, iXg, :, iXm])\n",
+    "            ac_ = sig.correlate(y, y, mode='full')\n",
+    "            ac_ /= ac_.max()\n",
+    "            acM += np.mean(ac_[:int(ac_.shape[0]/2)]**2)\n",
+    "        acMet[i, iXg] = acM / nMo"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "SI fig 1"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "lC = 5\n",
+    "gC = 3\n",
+    "fontS=9\n",
+    "figlabels = ['Local Metasta.', 'Global Metasta.', 'Autocorrelation\\nGlobal Order']\n",
+    "im = [None]*3\n",
+    "fgrid = dict(hspace=0.1,top=0.9,bottom=0.2, left=0.1, right=0.9)\n",
+    "fig, axs = pl.subplots(3,1,gridspec_kw=fgrid)\n",
+    "\n",
+    "fig.set_figheight(7)\n",
+    "fig.set_figwidth(7)\n",
+    "fig.set_dpi(900)\n",
+    "\n",
+    "im[0] = axs[0].pcolor(mL[1:,1:], cmap=cm.magma, vmin=0, vmax=np.nanmax(mLm[1:,1:]))\n",
+    "im[1] = axs[1].pcolor(mGc[1:,1:], cmap=cm.magma, vmin=0, vmax=np.nanmax(mGcm[1:,1:]))\n",
+    "im[2] = axs[2].pcolor(acMet[1:,1:], cmap=cm.magma_r,  vmin=0, vmax=np.nanmax(acMet[1:,1:]))\n",
+    "\n",
+    "axs[0].set_title('Resting state coordination dynamics')\n",
+    "for i in range(3):\n",
+    "    axc = pl.colorbar(im[i],ax=axs[i],ticks=[0,np.nanmax(im[i].get_array())])\n",
+    "    axc.set_label(figlabels[i])\n",
+    "    axc.ax.yaxis.tick_left()\n",
+    "    axc.ax.set_yticklabels(['0',\n",
+    "                            str(np.round(np.nanmax(im[i].get_array()),4))],rotation=90)\n",
+    "    axc.ax.get_ymajorticklabels()[0].set_verticalalignment('bottom')  \n",
+    "    axc.ax.get_ymajorticklabels()[1].set_verticalalignment('top') \n",
+    "    axs[i].yaxis.set_ticks_position('both')\n",
+    "    axs[i].set_yticks(np.arange(localC.shape[0]-1,None,2)+0.5)\n",
+    "    axs[i].set_yticklabels(np.round(localC[1::2],2))\n",
+    "    axs[i].set_aspect('equal')\n",
+    "    axs[i].set_ylabel('Local\\nCoupling',fontsize=fontS)\n",
+    "    circ = Circle((lC+0.5,gC+0.5), radius=0.5, facecolor='None', edgecolor='g', lw=2)\n",
+    "    axs[i].add_patch(circ)\n",
+    "    axs[i].set_xticklabels([''])\n",
+    "    axs[i].set_xticks(np.arange(globalC.shape[0]-1,None,2)+0.5)\n",
+    "    axs[i].xaxis.set_ticks_position('both')\n",
+    "axs[2].set_xticklabels(np.round(globalC[1::2],2),rotation='vertical')\n",
+    "    \n",
+    "axs[-1].set_xlabel('Global Coupling',fontsize=fontS)\n",
+    "axs[-1].patch.set(hatch='xx', edgecolor='k')\n",
+    "axs[-1].patch.set(color='k', fill=True,hatch='xx',edgecolor='w')\n",
+    "\n",
+    "# fname  = './dataSave/FigureSI_Rest.png'  \n",
+    "# pl.savefig(fname, dpi=None, facecolor='w', edgecolor=None,\n",
+    "#         orientation='portrait', papertype=None, format='png',\n",
+    "#         transparent=False, bbox_inches=None, pad_inches=0.,\n",
+    "#         frameon=None)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Evolution of optimizers, SI Fig 2"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "with np.load('./data/evolutionIter.npz') as data:\n",
+    "    scor = data['sCor']\n",
+    "    fit  = data['fit']\n",
+    "    ener = data['ener']\n",
+    "    puls = data['puls']"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "stg = 3 # Stage to be plotted\n",
+    "isl = 0 # I sland to be plotted\n",
+    "\n",
+    "fitMask= fit[isl][stg] > 0\n",
+    "fit0   = fit[isl][stg][fitMask]\n",
+    "scor0  = scor[isl][stg][fitMask]\n",
+    "ener0  = ener[isl][stg][fitMask]\n",
+    "puls0  = puls[isl][stg][fitMask,:]\n",
+    "iXsort = np.argsort(fit0)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "SI Fig 2"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "ylabels = ['Fit\\nIndex', 'Spear.\\n Corr.','Total \\n input' , 'Local \\n Input','Local \\n Input Sorted']\n",
+    "gridHratios = [0.13,0.13,0.13,0.3,0.3]\n",
+    "fgrid = dict(height_ratios=gridHratios,\n",
+    "             hspace=0.05,right=0.88)\n",
+    "fig, axs = pl.subplots(5, 1, gridspec_kw= fgrid)\n",
+    "axs[0].set_title('Evolution: Island 1, Stage 4')\n",
+    "axs[0].plot(fit0,'.', markersize=0.1)\n",
+    "axs[0].plot(fit0[iXsort],'.', markersize=0.1)\n",
+    "# axs[0].legend(['iterations', 'sorted'])\n",
+    "axs[1].plot(scor0,'.', markersize=0.1)\n",
+    "axs[1].plot(scor0[iXsort],'.', markersize=0.05)\n",
+    "axs[2].plot(ener0,'.', markersize=0.1)\n",
+    "axs[2].plot(ener0[iXsort],'.', markersize=0.05)\n",
+    "axs[2].set_ylim([0,60000])\n",
+    "axs[2].set_yticks([0,30000, 60000])\n",
+    "axs[3].matshow(puls0.T,aspect='auto',cmap='bwr',vmin=-1500,vmax=1500)\n",
+    "im = axs[4].matshow(puls0[iXsort,:].T,aspect='auto',cmap='bwr',vmin=-1500,vmax=1500)\n",
+    "axs[4].xaxis.tick_bottom()\n",
+    "axs[4].set_xlabel('iterations')\n",
+    "\n",
+    "\n",
+    "\n",
+    "axc = fig.add_axes((.89,0.14,0.01,0.4))\n",
+    "pl.colorbar(im, cax=axc)\n",
+    "\n",
+    "yLoc = (np.cumsum(np.flipud(gridHratios))) *0.8 +0.06# * (1-bottOff) + bottOff\n",
+    "ids  = ['(E)','(D)','(C)','(B)','(A)']\n",
+    "for i in range(5):\n",
+    "    axs[i].set_ylabel(ylabels[i])\n",
+    "    axs[i].yaxis.tick_right()\n",
+    "    axs[i].set_xlim([0,fit0.shape[0]])\n",
+    "    fig.text(0.07, yLoc[i],ids[i],ha='left',va='center')\n",
+    "    if i < 2:\n",
+    "        axs[i].set_ylim([0,1])\n",
+    "        axs[i].yaxis.get_major_ticks()[1].label2.set_verticalalignment('top')  \n",
+    "        axs[i].yaxis.get_major_ticks()[0].label2.set_verticalalignment('bottom')  \n",
+    "    if i == 2:\n",
+    "        axs[i].yaxis.get_major_ticks()[-1].label2.set_verticalalignment('top')  \n",
+    "        axs[i].yaxis.get_major_ticks()[0].label2.set_verticalalignment('bottom')  \n",
+    "    if i < 4:\n",
+    "        axs[i].set_xticks([])\n",
+    "    if i > 2:\n",
+    "        axs[i].set_yticks([])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Return to metastability, SI Fig 3"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "file = './data/metaFC1000afterPulse_G0p3L0p7.npz'\n",
+    "\n",
+    "with np.load(file) as data:\n",
+    "    siMglo = data['metaG']\n",
+    "    siMloc = data['metaL']\n",
+    "    si2fc  = data['sim2eFC']\n",
+    "\n",
+    "\n",
+    "fs = 100\n",
+    "t  = np.linspace(1/fs,siMglo.shape[1]/fs,siMglo.shape[1])\n",
+    "tR = np.arange(1/fs,1.5+1/fs,1/fs) # trial length\n",
+    "nR = tR.shape[0]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "SI Fig 3"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "ylabels = ['Global Meta.','Local Meta.','fit to FC']\n",
+    "fgrid = dict(height_ratios=[0.33,0.33,0.33],\n",
+    "             hspace=0.1)\n",
+    "fig, axs = pl.subplots(3, 1, gridspec_kw= fgrid)\n",
+    "\n",
+    "\n",
+    "axs[0].plot(t, siMglo[-1,:])\n",
+    "p1 = axs[1].plot(t, siMloc[-1,:,:,0].T, linewidth=0.5,alpha=0.7)\n",
+    "axs[2].plot(t, si2fc.T)\n",
+    "for i in range(3):\n",
+    "    axs[i].set_ylabel(ylabels[i])\n",
+    "    if i < 2:\n",
+    "        axs[i].set_xticks([])\n",
+    "        axs[i].set_ylim([0,1])\n",
+    "    else:\n",
+    "        axs[i].set_xlabel('Time [seconds]')\n",
+    "        axs[i].set_ylim([-0.5,1])\n",
+    "        axs[i].legend(loc=1,labels=['stage 1','stage 2','stage 3','stage 4'],\n",
+    "           labelspacing=0.1,framealpha=0.6,edgecolor='w',handletextpad=0.1)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.7"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

+ 101 - 0
runOptimization.py

@@ -0,0 +1,101 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Feb 23 10:12:45 2021
+
+@author: Oscar Portoles
+"""
+
+import pygmo as po
+import numpy as np
+# import optimProbs as udp
+import optimizationProcesses as udp
+import sys
+import time
+import copy
+
+def main():
+    velocity    = sys.argv[1] 	# velocty [m/s] given as input paramter in terminal
+    nStage      = 4
+    nTask       = 100		# nTask x nGenera = optimization iterations, 
+    nGenera     = 20
+    sizePop     = 50  		# number of individuals on each island 
+    fitPercentil= 99.5
+    probMigra   = 0.1 		# probability of a migration happening in a task "nTask"
+    zHist       = None
+    zSim        = []
+    simLog      = []
+    edgePulse   = []
+        
+    pathsave    = './dataSave/'
+    namefile    = 'stgSequence_Lopt_allPu1000_wE_P040_optIni_v' + str(velocity) + '_fixG0p15L0p6_Stg' 
+     
+    # algorithms: 
+    algos = [po.algorithm(po.de1220(gen=nGenera,allowed_variants=[1,3,6,8],variant_adptv=1)), # jDE (Brest et al.) [rand-to-best/1/exp, rand-to-best/1/bin, rand-to-current/2/bin, rand-to-current/2/exp]                 
+              po.algorithm(po.de1220(gen=nGenera,allowed_variants=[15,16],variant_adptv=1)), # jDE (Brest et al.) rand-to-current/2/bin
+                po.algorithm(po.de1220(gen=nGenera,allowed_variants=[15,16],variant_adptv=2)), # iDE (Elsayed  et al.) rand-to-current/2/bin
+                po.algorithm(po.de1220(gen=nGenera,allowed_variants=[1,3,6,8],variant_adptv=2)), # iDE (Elsayed et al.) best/1/bin, rand/1/bin, rand-to-current/2/bin, 
+                po.algorithm(po.de1220(gen=nGenera,allowed_variants=[2,7],variant_adptv=1)),
+                po.algorithm(po.de1220(gen=nGenera,allowed_variants=[15,16],variant_adptv=1)), # jDE (Brest et al.) rand-to-current/2/bin
+                po.algorithm(po.de1220(gen=nGenera,allowed_variants=[15,16],variant_adptv=2)), # iDE (Elsayed  et al.) rand-to-current/2/bin
+                po.algorithm(po.de1220(gen=nGenera,allowed_variants=[1,3,6,8],variant_adptv=1)),
+                po.algorithm(po.de1220(gen=nGenera,allowed_variants=[1,3,6,8],variant_adptv=2)), 
+                po.algorithm(po.de1220(gen=nGenera,allowed_variants=[2,7],variant_adptv=1)),             
+                ]
+    nIsla = len(algos)
+    # My island
+    myUDI = po.mp_island(use_pool=True)
+    myUDI.resize_pool(nIsla)  
+    # PUT the righ file name to read the bumps topologies!
+    for iXstage in range(nStage):
+        # problem
+        prob   = po.problem(udp.Opt_allROIpulse_Wenecor(stageIx=iXstage,vel=velocity,zh=zHist))  
+        # empty archi
+        archi = po.archipelago()
+        # fill up archipiealago with islands
+        for algo_ in algos:
+            archi.push_back(po.island(algo=algo_, prob=prob, size=sizePop, udi=myUDI))
+        print('All islands set for Stage ', iXstage+1)
+        start  = time.time()
+        # set migration topology
+        archi.set_topology( po.topology(po.ring(n=nIsla, w=probMigra)) )
+        # Evolve (optimize)
+        archi.evolve(n=nTask)
+        print(archi.status)
+        # wait for archi to end
+        archi.wait_check()
+        print('Stage: ', iXstage+1, ', best ftiness: ',[np.round(fIsla[0],3) for fIsla in archi.get_champions_f()] , 
+              ', Duration: ', np.round( (time.time() - start)/3600, 4))
+        # logs from UDP and migration
+        logsUDP  = []
+        for isla_ in archi:
+            logsUDP.append( isla_.get_population().problem.extract(type(udp.Opt_allROIpulse_Wenecor())).get_mylogs() )    
+
+        log_mig = archi.get_migration_log()
+        logsUDP = np.asarray(logsUDP)
+        # Find and re-run the best solution to pass the end as initial conditions
+        udpH = udp.FindAndKeepBestSolution(stageIx=iXstage,vel=velocity,zh=zHist)
+        udpH.getBestWeightedModel(logsUDP, fitPercentil)
+        zHist = udpH.runBestModel()
+        zi = np.imag(zHist)
+             
+        # keep simulated data
+        zSim.append( udpH.z[:,udpH.maxDlay+1:,:] )
+        simLog.append( udpH.log)
+        edgePulse.append( udpH.edgePulse )
+        # save results of current stage
+        outfile  = pathsave + namefile + str(iXstage+1) + '.npz'
+        np.savez(outfile,logsUDP=logsUDP, nGen=nGenera, sizeP=sizePop, pMig=probMigra,
+                 log_mig=log_mig,nIsla=nIsla, allow_pickle=True)
+        print('Done with ', namefile)
+
+    zSim    = np.hstack(zSim)
+    simLog  = np.asarray(simLog)
+    outfile  = pathsave + namefile + 'trialZ.npz'
+    np.savez(outfile,sim_z=zSim, sim_log=simLog,edgePulse=edgePulse)
+    return zSim
+
+if __name__ == "__main__":
+    __spec__ = "ModuleSpec(name='builtins', loader=<class '_frozen_importlib.BuiltinImporter'>)"
+    z = main()
+   

+ 61 - 0
thresholdPLIbysurro.m

@@ -0,0 +1,61 @@
+% Threshold PLI above surrogate level and minimal change, FC emerge, disolve or remain
+
+
+clear all
+% close all
+clc
+
+
+surroName   = 'surroCirc200.mat';
+
+fcPath      = ['./data'];
+dataPath    = './data/thetaROIs_R.mat';
+fcName      = '/FCraw_R.mat';
+hands = 1;
+
+load([fcPath surroName], 'p95a')
+load([fcPath fcName])
+% clear fcPath dataPath surroName fcName
+nStage = size(pliR, 1);
+
+% p95a = prctile(abs(pliSurro),95,4); % minimum FC respects to Surrogates
+% binarize network above threshold
+
+fc01 = logical(abs(pliR) >= squeeze(p95a(:,:)));
+fcTh = pliR;
+fcTh(~fc01) = 0;
+
+% differences between stages
+nDisolv = zeros(nStage-1,1);
+nEmerg  = zeros(nStage-1,1);
+nStill  = zeros(nStage-1,1);
+nTotal  = zeros(nStage,1);
+nFlip   = zeros(nStage-1,1);
+
+difTest = [];
+for i = 1:nStage-1
+    % disove FC
+    condition   = logical(fc01(i,:) == 1 & fc01(i+1,:)  == 0 );
+    difTest     = horzcat(difTest, abs( pliR( i, condition ) - pliR( i+1, condition ) ));
+    % emerge FC
+    condition   = logical( fc01(i,:) == 0 & fc01(i+1,:)  == 1 );
+    difTest     = horzcat(difTest, abs( pliR( i, condition ) - pliR( i+1, condition ) ));
+end
+
+minDif = prctile(difTest,5); % minimal difference in change of FC between consecutive stages
+
+% remove FCs that emerge or disolve between stages but its difference on value is small.
+for i = 1:nStage-1
+    % disolves FC
+    belowMinDiff = logical(fc01(i,:) == 1 & fc01(i+1,:) == 0 & abs(pliRL(i,:)-pliRL(i+1,:)) < minDif);
+    fc01(i,belowMinDiff) = 0;
+    fcTh(i,belowMinDiff) = 0;
+    % emerges FC
+    belowMinDiff = logical( fc01(i,:) == 0 & fc01(i+1,:)  == 1 & abs(pliR(i,:)-pliR(i+1,:)) < minDif );
+    fc01(i+1,belowMinDiff) = 0;
+    fcTh(i+1,belowMinDiff) = 0;
+end
+
+
+%save('dataSave/FCstages.mat', 'fcTh','fc01')
+

+ 20 - 0
vis1ERPFCwindows.m

@@ -0,0 +1,20 @@
+function vis1ERPFCwindows(data,fc,onPnt,satur)
+% compute ERP from data locked to onset, no conditions or subjects
+% @data: [samples,fc, stages]
+
+nStg = size(data,3);
+
+figure('Position',[10,500,1600,400])
+for st = 1: nStg
+    subplot(1,nStg, st)
+    data_ = squeeze( mean(abs(data(:,fc(st,:),st)),2));
+   
+    plot(data_)
+    hold on
+    xline(onPnt,'r','LineWidth',2);
+    xline(onPnt-2.5,'--k','LineWidth',1.5);
+    xline(onPnt+2.5,'--k','LineWidth',1.5);
+    xline(onPnt-7.5,'--k','LineWidth',1.5);
+    xlabel('t*10 [ms]')
+
+end

+ 20 - 0
visERPwindows.m

@@ -0,0 +1,20 @@
+function visERPwindows(data,onPnt,satur)
+% compute ERP from data locked to onset, no conditions or subjects
+% @data: [samples, rois, trials]
+
+nStg = size(data,3);
+
+figure('Position',[10,500,1600,400])
+range = [min(data,[],'all'), max(data,[],'all')];
+for st = 1: nStg
+    subplot(1,nStg, st)
+    data_ = squeeze(data(:,:,st));
+    imagesc(data_', range)
+    hold on
+    xline(onPnt,'r','LineWidth',2);
+    xline(onPnt-2.5,'--k','LineWidth',1.5);
+    xline(onPnt+2.5,'--k','LineWidth',1.5);
+    xline(onPnt-7.5,'--k','LineWidth',1.5);
+    xlabel('t*10 [ms]')
+    colorbar
+end

+ 90 - 0
withinStagePLI.m

@@ -0,0 +1,90 @@
+% get bump locations and define tempral windows for connectivity
+
+clear all
+close all
+clc
+
+buOff   = 2; % FC stges everything between bumps except fro bumps
+blOff   = 40; % 400 milliseconds baseline
+withBL  = true;
+dimBL   = [10, 6]; % form 40 - 10 to onset - 6 samples -> BL with 25 samples, 250 millisecons
+dataHSMM = '/hmmBu4RroiAveMap3StagBL.mat';
+
+
+nSj     = 18;
+fs      = 100;
+nRoi    = 68;
+nFC     = (nRoi^2)/2 - nRoi/2;
+
+dataPath = './data';
+dataMEG  = './data/thetaROIs_R.mat';
+saveFile = './dataSave' ;
+folderModel = dataHSMM(1:end-4);
+
+load([dataPath dataHSMM], 'proBump','x','y')
+load(dataMEG, 'roiaveR','xR', 'yR', 'subjtR')
+
+nStage  = size(proBump, 3)+1;
+nStageBL= nStage;
+buOff_i = buOff * ones(1, nStage-1);
+buOff_i = [0, buOff_i];
+buOff_e = buOff * ones(1, nStage-1);
+buOff_e = [buOff_e, 0];
+if withBL, 
+    nStageBL = nStage + 1; 
+    buOff_i  = [dimBL(1), buOff_i];
+    buOff_e  = [dimBL(2), buOff_e];
+end
+
+roiR = single(roiaveR);
+xd = xR;
+yd = yR;
+subj = subjtR;
+clear roiaveR xR yR subjtR 
+
+pairs =  combnk([1:nRoi],2);
+tooShort = [];
+
+nTr     = length(x);
+lens    = y - x + 1;
+pliTr   = zeros(nStageBL, nFC, nTr, 'single');
+maxN    = max(lens);
+buSampl = ones(nTr, nStage,'single');
+% location of bumps given by HsMM
+for bu = 1:nStage-1
+    buSampl(:,bu+1)   = round( [1:maxN] * squeeze(proBump(:,:,bu)) );
+end
+buSampl = horzcat(buSampl, lens);
+buSampl = buSampl + blOff;
+if withBL,
+    buSampl = horzcat(ones(size(buSampl,1), 1), buSampl);
+end
+for tr = 1:nTr
+    yTr = y(tr) - x(tr) + 1;
+    data = hilbert(roiR(xd(tr):yd(tr),:));
+    % common spectral density
+    csd = data(:, pairs(:,1)) .* conj(data(:, pairs(:,2)));
+    % sample phase differences sign
+    siCsd = sign( imag( csd ));
+     % stage mean PLI
+    for s = 1:size(buSampl,2)-1  
+        if (buSampl(tr,s+1)-buOff_e(s)) - (buSampl(tr,s)+buOff_i(s)) <= 0,
+            pliTr(s,:,tr) = 0;
+            tooShort = vertcat(tooShort, [s,tr]);
+        end
+        pliTr(s,:,tr) = mean( siCsd( buSampl(tr,s)+buOff_i(s):buSampl(tr,s+1)-buOff_e(s) ,:), 1);
+    end
+end
+% average by subjects
+pliSj   = zeros(nStageBL, nFC, nSj);
+for sj = 1:nSj
+    pliSj(:,:,sj) =  squeeze( mean(pliTr(:,:,subj==sj), 3) );  % mean over subjects
+end
+pliR = squeeze( mean(pliSj,3) );
+ 
+
+save2path = [saveFile folderModel];
+%save([save2path '/FCraw.mat'], 'pliR')
+
+
+

+ 100 - 0
withinStagePLIsurrogate.m

@@ -0,0 +1,100 @@
+% do within-stage PLI surrogates after geting bump locations and define tempral windows for connectivity
+
+clear all
+close all
+clc
+
+nSurro  = 200;
+buOff   = 2; % FC stges everything between bumps except for bumps
+withBL  = true;
+dimBL   = [10, 6]; % form 40 - 10 to onset - 6 samples -> BL with 25 samples, 250 millisecons
+dataHSMM = '/hmmBu4RroiAveMap3StagBL.mat';
+savename = '/surroCirchmmBu4RroiAveMap3StagBL.mat';
+
+
+blOff   = 40; % 400 milliseconds baseline
+nSj     = 18;
+fs      = 100;
+nRoi    = 68;
+nFC     = (nRoi^2)/2 - nRoi/2;
+
+savePath = './dataSave';
+dataPath = './data';
+dataMEG  = './data/thetaROIs_R.mat';
+load([dataPath dataHSMM], 'proBump', 'x','y')
+load(dataMEG, 'roiaveR','xR', 'yR', 'subjtR')
+
+nStage  = size(proBump, 3)+1;
+nStageBL= nStage;
+buOff_i = buOff * ones(1, nStage-1);
+buOff_i = [0, buOff_i];
+buOff_e = buOff * ones(1, nStage-1);
+buOff_e = [buOff_e, 0];
+if withBL, 
+    nStageBL = nStage + 1; 
+    buOff_i  = [dimBL(1), buOff_i];
+    buOff_e  = [dimBL(2), buOff_e];
+end
+
+roiR = single(roiaveR);
+xd   = xR;
+yd  = yR;
+subj = subjtR;
+
+clear roiaveR xR yR subjtR 
+
+pairs =  combnk([1:nRoi],2);
+tooShort = [];
+pliSurro   = zeros(nStageBL, nFC, nSurro, 'single');
+
+for sj = 1:nSj
+    xj  = x(subj==sj); % bumps
+    yj  = y(subj==sj);
+    xdj = xd(subj==sj); % data 
+    ydj = yd(subj==sj);
+    nTr     = length(xj);
+    lens    = yj - xj + 1;
+    pliTr   = zeros(nStageBL, nFC, nSurro, nTr,'single');
+    maxN    = max(lens);
+    buSampl = ones(nTr, nStage,'single');
+    % location of bumps given by HsMM
+    for bu = 1:nStage-1
+        buSampl(:,bu+1)   = round( [1:size(proBump,1)] * squeeze(proBump(:, subj==sj, bu)));
+    end
+    buSampl = horzcat(buSampl, lens);
+    buSampl = buSampl + blOff;
+    if withBL,
+        buSampl = horzcat(ones(size(buSampl,1), 1), buSampl);
+    end
+    for tr = 1:nTr
+        % pli: corss spectral power
+        data    = hilbert(roiR(xdj(tr):ydj(tr),:));
+        data    = repmat(data,1,1,nSurro);
+        dataSu  = zeros(size(data));
+        raIx    = uint32(randi(lens(tr)-1,1,nSurro));
+        for si = 1:nSurro
+            dataSu(:,:,si) = circshift(data(:,:,1), raIx(si));
+        end
+        % corss spectral density
+        csd = data(:, pairs(:,1),:) .* conj(dataSu(:, pairs(:,2),:));
+        % sample phase differences sign
+        siCsd = sign( imag( csd )); % [samples, FC, surros]
+         % stage mean PLI
+        for s = 1:size(buSampl,2)-1  
+            if (buSampl(tr,s+1)-buOff_e(s)) - (buSampl(tr,s)+buOff_i(s)) <= 0,
+                pliTr(s,:,:,tr) = 0; % [stages, FC, surro, trials]
+                tooShort = vertcat(tooShort, [s,tr]);
+            end    
+            pliTr(s,:,:,tr) = mean( siCsd( buSampl(tr,s)+buOff_i(s):buSampl(tr,s+1)-buOff_e(s) ,:,:), 1);
+        end
+    end
+    if any(any(any(any(isnan(pliTr)))))
+         aNaN = 1;
+    end
+    pliSurro(:,:,:) =  squeeze(pliSurro(:,:,:)) + squeeze( mean(pliTr, 4) ) ./ nSj;  % mean over subjects
+end
+p95a = prctile(abs(pliSurro),95,3); % minimum FC respects to Surrogates
+p98a = prctile(abs(pliSurro),98,3); % minimum FC respects to Surrogates
+
+%save([savePath savename], 'pliSurro','p95a', 'p98a', 'tooShort')
+