I'm trying to launch this choregraph app (which is not mine) and I got some issue.I'm following the steps from github:https://github.com/conema/Choregraphe-GA?tab=readme-ov-file, I already configured the server and it works well but when I launch the app I have the error below.
Basically, this apps is supposed to make your robot to act like google assistant.
The 'speech recognition' box works well but leads directly to 'error' box after entering the 'GA' box and I really don't understand why.
I put the GA code and some images.
(I'm new on using choregraph but I really need to make this app works). I need help please.
[enter image description here](https://i.stack.imgur.com/mWYXM.png)
import waveimport timeimport socketimport reimport jsonimport sysimport threadingfrom subprocess import Popen, PIPEfrom threading import Threadfrom Queue import Queue, Empty# TCP clientTCP_IP = '127.0.0.1'TCP_PORT = 4000BUFFER_SIZE = 512# GA answersEOU = "END_OF_UTTERANCE"DFO = "DIALOG_FOLLOW_ON"# Recording comcommand = ['arecord','--format', 'S16_LE','--rate', '16000','--file-type', 'raw']ON_POSIX = 'posix' in sys.builtin_module_namesdef enqueue_output(out, queue): t = threading.currentThread() for line in iter(out.readline, b''): if getattr(t, "stop_t", False): # Stop thread break queue.put(line) out.close()def connect_ga(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((TCP_IP, TCP_PORT)) return sdef start_recording(): p = Popen(command, stdout=PIPE, bufsize=1, close_fds=ON_POSIX) q = Queue() t = Thread(target=enqueue_output, args=(p.stdout, q)) t.daemon = True t.start() return q, tdef remove_emoji(text): emoji_pattern = re.compile("[" u"\U0001F600-\U0001F64F" # emoticons u"\U0001F300-\U0001F5FF" # symbols & pictographs u"\U0001F680-\U0001F6FF" # transport & map symbols u"\U0001F1E0-\U0001F1FF" # flags (iOS)"]+", flags=re.UNICODE) return emoji_pattern.sub(r'', text)class MyClass(GeneratedClass): def __init__(self): GeneratedClass.__init__(self) self.tts = ALProxy('ALTextToSpeech') self.soundPeak = False def onLoad(self): #put initialization code here pass def onUnload(self): #put clean-up code here self.s.close() self.t.stop_t = True self.t.join() pass def onInput_audioTrigger(self): self.soundPeak = True def start_zora(self): self.isEOU = False self.eu = "" self.q, self.t = start_recording() self.logger.info("Recording...") self.s = connect_ga() self.logger.info("Connected to GA") self.onListening() def onInput_onStart(self): while not self.soundPeak: pass self.start_zora() #audioList = [] while True: if(not self.isEOU): #Send audio try: #Get mic data from thread data = self.q.get_nowait() except Empty: #If we have no new data, try again pass else: #We have data, send it #audioList.append(data) self.s.send(data) self.eu += self.s.recv(BUFFER_SIZE) if (self.eu is None): self.logger.info("Wrong answer from server") break if("0END_OF_UTTERANCE" in self.eu): self.isEOU = True self.logger.info("END_OF_UTTERANCE") while True: self.eu += self.s.recv(BUFFER_SIZE) #No more data from server if("STOP_JSON" in self.eu): break self.logger.info("finished receiving") #Stop thread and wait for exit self.t.stop_t = True self.t.join() self.logger.info("finished recording") else: #Analyse answer from google''' #[DEBUG] write audio file = wave.open('c:Users/dongm/OneDrive/Bureau/Nao/GA-Server/test/whoisobama.wav', 'wb') file.setnchannels(1) file.setframerate(16000) file.setsampwidth(2) file.writeframes(b''.join(audioList)) file.close()''' self.onUnderstood() data = re.search('START_JSON(.*)STOP_JSON', self.eu) if data: response = json.loads(data.group(1)) del self.eu if "text" in response: #We have a text answer from GA speach = remove_emoji(response["text"]) speach = speach.encode('utf-8') speach = re.sub(r'\.\n.*\n*\( +.+\)\.*', '', speach) #remove link from supplemental display text speach = re.sub(r'-{3}', '', speach) #remove --- speach = re.sub(r'\(http.*\)', '', speach) #remove \\nWikipedia (http...) speach = re.sub(r'\\n.*', '', speach) #remove \\nWikipedia(http....) transcript = unicode(response["transcript"]) #the input audio transcript microphone_mode = str(response["microphone_mode"]) #CLOSE_MICROPHONE or DIALOG_FOLLOW_ON conversation_state = response["conversation_state"] #state information for the subsequent audio #self.logger.info(speach) self.tts.say(speach) self.s.close() self.goReco() if microphone_mode != DFO: #If GA dont't want an answer from the user self.soundPeak = False while not self.soundPeak: pass self.start_zora() else: self.logger.info("Error/No answer") #break self.onError() self.goReco() self.soundPeak = False while not self.soundPeak: pass self.start_zora() self.logger.info("finished GA") self.onStopped() pass def onInput_onStop(self): self.onUnload() #it is recommended to reuse the clean-up as the box is stopped self.onStopped() #activate the output of the boxI fixed all the parameters well but it doesn't want to work.