123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134 |
- '''
- description: test spike rates from cereconn
- author: Ioannis Vlachos
- date: 16.11.18
- based on Jonas Zimmermann test scripts
- Copyright (c) 2018 Ioannis Vlachos.
- All rights reserved.'''
- import sys
- import time
- import cere_conn as cc
- import numpy as np
- import pylab as plt
- import aux
- params = aux.load_config()
- # get CereConn object
- ck = cc.CereConn(withSRE=True, withSBPE=False)
- ck.send_open()
- # Wait until connection is established
- t = time.time()
- while ck.get_state() != cc.ccS_Idle:
- time.sleep(0.005)
- print("It took {:5.3f}s to open CereConn\n".format(time.time() - t))
- # start recording
- ck.send_record()
- t = time.time()
- while ck.get_state() != cc.ccS_Recording:
- time.sleep(0.05)
- print("It took {:5.3f}s to start CereConn recording\n".format(time.time() - t))
- # at least with NPlayServer, we need to wait for things to settle
- time.sleep(.5)
- # ck.set_spike_rate_estimator_ch_u_list([(0,1), (1,1), (0, 0), (1, 0) ])
- # ck.set_spike_rate_estimator_ch_u_list([(0, 0), (1, 0), (2, 0), (3, 0), (4, 0), (5, 0)])
- n_channels = 128
- ck.fill_spike_rate_estimator_ch_u_list(n_channels, 1) # record channels, pool all units per channel
- ch_map = ck.get_spike_rate_estimator_ch_u_map()
- print("Channel map: {}".format(ch_map))
- bin_width = params.daq.spike_rates.bin_width
- run_time = int(sys.argv[1]) #sec
- # run_time = run_time #+ bin_width / 1000 / 2. #sec
- ck.set_spike_rate_estimator_loop_interval_ms(params.daq.spike_rates.loop_interval)
- # ck.set_spike_rate_estimation_method_exponential(params.daq.spike_rates.decay_factor)
- ck.set_spike_rate_estimation_method_boxcar(params.daq.spike_rates.max_bins)
- time.sleep(.5)
- cd = ck.get_spike_rate_data()
- print(cd['rates'].shape)
- data = np.empty((0, n_channels))
- # keep stats...
- lasttime = time.time()
- starttime = lasttime
- times = []
- samples = {}
- start_ts = cd['ts']
- last_ts = start_ts
- print('STARTING LOOP\n')
- ii = 0
- while (time.time() - starttime <= run_time):
- now_time = time.time()
- print(f'Time left: {run_time - (now_time - starttime ):.1f}s.')
-
- time.sleep(min(0.5, run_time - (now_time - starttime )))
-
- cd = ck.get_spike_rate_data()
- print(f"Loop {ii}, rates shape: {cd['rates'].shape}")
- data = np.concatenate((data, cd['rates']))
- print(f'Total data shape: {data.shape}.')
- # print(np.sum(data, 1))
-
- # print("\nNSP Time since start {}s. Time since last loop {}s".format((cd['ts'] - start_ts) / 30000.0, (cd['ts'] - last_ts) / 30000.0))
- last_ts = cd['ts']
- ctime = time.time()
- times.append(ctime - lasttime)
- lasttime = ctime
-
- # print("System time since last loop: {}s".format(times[-1]))
- # print(f'time elapse: {time.time()-starttime}')
- ii += 1
- # print(data)
- # print(np.any(data))
- print('\n')
- print(f'Total data shape: {data.shape}')
- print('\n')
- print('\n')
- ck.send_close()
- # compute percentiles to select channel for audio feedback
- pp = np.percentile(data,[5,95],axis=0)
- idx = np.argsort(pp[1])
- print(idx)
- print(pp[0:2, idx])
- plt.figure(1, figsize=(18,5))
- plt.clf()
- plt.bar(range(128), pp[1])
- plt.bar(range(128), pp[0]+0.01)
- # plt.ylim(bottom=-1)
- plt.title('Firing rates percentiles')
- plt.xlabel('Channel #')
- plt.ylabel('sp/sec')
- # plt.plot(pp.T,'-o')
- plt.show()
|