123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229 |
- #!/usr/bin/env python3
- # -*- coding: utf-8 -*-
- """
- Created on Sun Jul 22 17:08:29 2018
- @author: ale, modified by Ioannis Vlachos
- """
- import os.path
- import wave
- from abc import ABC, abstractmethod
- import munch
- # import pyaudio
- import pyttsx3
- # import sounddevice as sd
- # import soundfile as sf
- import aux
- from aux import log
- class Paradigm(ABC):
- '''
- Description: abstract class for paradigms
-
- '''
-
- # p = pyaudio.PyAudio()
- # initialize audio
- engine = pyttsx3.init()
- # engine.setProperty('voice','com.apple.speech.synthesis.voice.anna.premium')
- engine.setProperty('voice','mb-de6') # best voice so far for german, note: https://github.com/numediart/MBROLA-voices
- # def __init__(self, pyttsx_rate=100):
- # # rate = engine.getProperty('rate')-150
- # self.engine.setProperty('rate',pyttsx_rate)
- # log.info(f'pyttsx3 rate is: {pyttsx_rate}')
-
- # return None
- @abstractmethod
- def __init__(self, *args, **kwargs):
- super().__init__()
- self.params = munch.Munch({})
- if 'params' in kwargs.keys():
- self.params = kwargs['params']
- else:
- self.params = aux.load_config()
- @abstractmethod
- def process_result(self,decision=None):
- '''
- Process the result based on the initialized paradigm
-
- Parameters:
- decision: boolean (value of the last decision, default None if it is a training or screening paradigm mode)
-
- Return:
- finish: boolean (True if the paradigm is finished, False otherwise)
- '''
- return finish
-
- @abstractmethod
- def present_stimulus(self,audio=True):
- '''
- Play auditorly the current stimulus
-
- Parmeters:
- audio: boolean (default True for enabling the audio playback)
-
- Return:
- stimulus: string (string version of the played stimulus)
- '''
- return stimulus
-
- @abstractmethod
- def get_current_state(self):
- '''
- Return the current state
-
- Parameters:
- nothing
-
- Return:
- state: list with two elements
- general_state: string (general state, e.g. current selected string)
- specific_state: string (specific state, e.g. current selection)
- '''
- return general_state, specific_state
-
- @abstractmethod
- def get_mode(self):
- '''
- Return the mode of the used paradigm: Screening, Training, Validation or Free
-
- Parameters:
- nothing
-
- Return:
- mode: string (paradigm mode: 'Screening', 'Training', 'Validation' or 'Free')
- '''
- pass
-
- @abstractmethod
- def save_log(self):
- '''
- Save the log
-
- Parameters:
- nothing
-
- Return:
- nothing
- '''
- pass
-
- def close(self):
- '''
- Close the speller and all the active processes
- '''
- #print('quit')
- #self.p.terminate()
- return
- def _read_config(self,file):
- '''
- Read the configuration yaml file
-
- Parameters:
- file: string (name of the configuration yaml file)
-
- Return:
- config: munch structure (configuration file)
- '''
- try:
- with open(file) as stream:
- config = munch.fromYAML(stream)
- return config
- except Exception as e:
- raise e
- def _play_audio(self,wav_file,start_trigger,end_trigger):
- '''
- Play the wav file using wave and pyaudio
-
- Parameters:
- wav_file: string (name of the wav file)
- start_trigger: int (trigger of the start of the sound)
- end_trigger: int (trigger of the end of the sound)
- '''
- CHUNK = 1024
- wf = wave.open(wav_file,'rb')
- stream = self.p.open(format=self.p.get_format_from_width(wf.getsampwidth()),
- channels = wf.getnchannels(),
- rate = wf.getframerate(),
- output=True)
-
- self._event_trigger(start_trigger)
-
- data = wf.readframes(CHUNK)
- while data:
- stream.write(data)
- data = wf.readframes(CHUNK)
- stream.stop_stream()
-
- self._event_trigger(end_trigger)
-
- stream.close()
-
- def _play_sound(self,wav_file,start_trigger,end_trigger):
- '''
- Play the wav file using wave and sounddevice
-
- Parameters:
- wav_file: string (name of the wav file)
- start_trigger: int (trigger of the start of the sound)
- end_trigger: int (trigger of the end of the sound)
- '''
- try:
- os.path.getsize(wav_file) # just to raise the correct exception if not found
- except Exception as e:
- raise e
- # data,fs = sf.read(wav_file)
- self._event_trigger(start_trigger)
- # sd.play(data,fs,blocking=True)
- os.system('aplay ' + wav_file + ' -q')
- self._event_trigger(end_trigger)
-
- def _say(self,text,start_trigger,end_trigger):
- '''
- Convert text to speech and play it using pyttsx3
-
- Parameters:
- text: string or list[string] (text to convert and play)
- start_trigger: int (trigger of the start of the sound)
- end_trigger: int (trigger of the end of the sound)
- '''
- def onStart(name):
- return self._event_trigger(start_trigger)
- def onEnd(name,completed):
- return self._event_trigger(end_trigger)
- token_start = self.engine.connect('started-utterance',onStart)
- token_end = self.engine.connect('finished-utterance',onEnd)
-
- if text.__class__ == list:
- for sentence in text:
- if sentence != '.':
- self.engine.say(sentence)
- else:
- if text != '.':
- self.engine.say(text)
- self.engine.runAndWait()
-
- self.engine.disconnect(token_start)
- self.engine.disconnect(token_end)
-
- def _event_trigger(self,number):
- '''
- Send a trigger for an event
-
- Parameters:
- number: integer (number to send as trigger)
- '''
- #do something
- # print(number)
- return
-
- def __del__(self):
- log.debug('speller destructed')
|