Scheduled service maintenance on November 22


On Friday, November 22, 2024, between 06:00 CET and 18:00 CET, GIN services will undergo planned maintenance. Extended service interruptions should be expected. We will try to keep downtimes to a minimum, but recommend that users avoid critical tasks, large data uploads, or DOI requests during this time.

We apologize for any inconvenience.

feedback.py 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. '''
  2. description: class for neurofeedback
  3. author: Ioannis Vlachos
  4. date: 08.01.2019
  5. Copyright (c) 2019 Ioannis Vlachos.
  6. All rights reserved.'''
  7. import os
  8. import random
  9. import aux
  10. from aux import log
  11. # from paradigm import Paradigm
  12. import pyaudio
  13. from scipy.io import wavfile
  14. from scipy import signal
  15. import numpy as np
  16. import munch
  17. def norm2freq(norm_rate, alpha=360, beta=80):
  18. return norm_rate * alpha + beta
  19. def note(freq, t_len, amp=1, rate=44100, mode='sin'):
  20. n_sample = int(t_len * rate)
  21. factor = (float(freq) * np.pi * 2 / rate)
  22. data = []
  23. if mode == 'saw':
  24. data = signal.sawtooth(np.arange(n_sample) * factor, .25) * amp
  25. else:
  26. data = np.sin(np.arange(n_sample) * factor) * amp
  27. fade = int(n_sample/5)
  28. fade_in = np.arange(0., 1., 1/fade)
  29. fade_out = np.arange(1., 0., -1/fade)
  30. data[:fade] = np.multiply(data[:fade], fade_in)
  31. data[-fade:] = np.multiply(data[-fade:], fade_out)
  32. return data.astype(np.float32)
  33. def load_wav(wav_file, channels=2, rate=44100):
  34. r, wf = wavfile.read(wav_file)
  35. if wf.dtype == 'int16':
  36. wf = np.float32(wf) / pow(2,15)
  37. if wf.shape[1] > channels:
  38. wf = wf[:, :channels]
  39. elif wf.shape[1] < channels:
  40. if wf.shape[1] == 1:
  41. wf = np.repeat(wf, channels, axis=1)
  42. if r != rate:
  43. n_s = int(wf.shape[0] * rate / r)
  44. wf = signal.resample(wf, n_s)
  45. return wf
  46. RATE = 44100
  47. # class Feedback(Paradigm):
  48. class Feedback():
  49. '''
  50. Paradigm for neurofeedback based on action potentials and/or LFP
  51. '''
  52. def __init__(self, audio_fb_target, params=None, **kwargs):
  53. '''
  54. Initialize the paradigm.'''
  55. self.audio_fb_target = audio_fb_target
  56. self.params = params
  57. config = self.params.paradigms.feedback
  58. log.info(config)
  59. self.audio_path = config.audio_path
  60. self.num_stim = config.number_of_stim
  61. self.mode = config.mode[config.selected_mode]
  62. self.states = config.states
  63. self.list_of_stimuli = self._create_list()
  64. # try:
  65. self.current_stimulus = self.list_of_stimuli.pop(0)
  66. self.history = []
  67. pa = pyaudio.PyAudio()
  68. self.stream = pa.open(output=True, channels=2, rate=RATE,format=pyaudio.paFloat32)
  69. self.sounds = {'success': load_wav(config.sounds.success).tostring(),
  70. 'fail': load_wav(config.sounds.fail).tostring()
  71. }
  72. def _create_list(self):
  73. '''
  74. Create a list of randomly ordered up/down stimuli
  75. Parameters:
  76. '''
  77. state_names = [k for k in self.states.keys() if k != 'baseline'] * self.num_stim
  78. random.seed()
  79. random.shuffle(state_names)
  80. if 'baseline' in self.states.keys():
  81. state_names = [k for state in state_names for k in ('baseline', state)]
  82. return state_names
  83. def present_stimulus(self, audio=True):
  84. '''
  85. Play auditorly the current stimulus using the play function defined in
  86. the abstract class
  87. Return:
  88. current_stimulus: string (the stimulus that is presented)
  89. '''
  90. log.info("Presenting stimulus")
  91. if audio:
  92. alpha = self.params.feedback.alpha
  93. beta = self.params.feedback.beta
  94. length = self.params.feedback.target_tone_length
  95. for i in range(len(self.audio_fb_target)):
  96. self.audio_fb_target[i] = self.states[self.current_stimulus][i]
  97. target_freq_float = norm2freq(self.audio_fb_target[0], alpha, beta)
  98. target_tone = note(target_freq_float, length, amp=.25, rate=RATE, mode='saw')
  99. tone = np.repeat(target_tone.reshape((-1, 1)), 2, axis=1).flatten()
  100. if self.stream.is_stopped():
  101. self.stream.start_stream()
  102. self.stream.write(tone.tostring())
  103. self.stream.stop_stream()
  104. log.debug('stimulus end')
  105. return self.current_stimulus
  106. def process_result(self, decision, **kwargs):
  107. '''ignore argument decision, just for compatibility reasons with other spellers '''
  108. if self.params.paradigms.feedback.play_end_feedback.success and ((decision == aux.decision.yes.value and self.current_stimulus == 'up') or (decision == aux.decision.no.value and self.current_stimulus == 'down')):
  109. if self.stream.is_stopped():
  110. self.stream.start_stream()
  111. self.stream.write(self.sounds['success'])
  112. self.stream.stop_stream()
  113. elif self.params.paradigms.feedback.play_end_feedback.fail and ((decision != aux.decision.yes.value and self.current_stimulus == 'up') or (decision != aux.decision.no.value and self.current_stimulus == 'down')):
  114. if self.stream.is_stopped():
  115. self.stream.start_stream()
  116. self.stream.write(self.sounds['fail'])
  117. self.stream.stop_stream()
  118. if self.list_of_stimuli:
  119. self.current_stimulus = self.list_of_stimuli.pop(0)
  120. return False
  121. else:
  122. return True
  123. def get_current_state(self):
  124. return 'feedback', self.current_stimulus
  125. def get_mode(self):
  126. '''
  127. Return the mode of the used paradigm
  128. Return:
  129. mode: string (mode of the paradigm)
  130. '''
  131. return self.mode
  132. def save_log(self):
  133. '''
  134. Save the log
  135. '''
  136. pass
  137. def close(self):
  138. return
  139. def _read_config(self,file):
  140. '''
  141. Read the configuration yaml file
  142. Parameters:
  143. file: string (name of the configuration yaml file)
  144. Return:
  145. config: munch structure (configuration file)
  146. '''
  147. try:
  148. with open(file) as stream:
  149. config = munch.fromYAML(stream)
  150. return config
  151. except Exception as e:
  152. raise e