diff --git a/hugvey/speech/recorder.py b/hugvey/speech/recorder.py index 97695ac..1d7eda9 100644 --- a/hugvey/speech/recorder.py +++ b/hugvey/speech/recorder.py @@ -50,25 +50,23 @@ class Recorder: def writeData(self): - if len(self.data) < 1: - self.logger.info("Skip empty wave creation") - return - - if self.record_voice: - self.fragmentNr += 1 + if len(self.data) < 1: + self.logger.info("Skip empty wave creation") + else: + self.fragmentNr += 1 - fn = os.path.join(self.out_folder, f"{self.fragmentNr}.wav") + fn = os.path.join(self.out_folder, f"{self.fragmentNr}.wav") - self.logger.info(f"Write wave: {fn}") + self.logger.info(f"Write wave: {fn}") - self.wf = wave.open(fn, 'wb') - self.wf.setnchannels(1) - self.wf.setsampwidth(2) - self.wf.setframerate(self.src_rate) - # adapted from https://stackoverflow.com/questions/892199/detect-record-audio-in-python#6743593 - self.wf.writeframes(pack('<' + ('h'*len(self.data)), *self.data)) - self.wf.close() + self.wf = wave.open(fn, 'wb') + self.wf.setnchannels(1) + self.wf.setsampwidth(2) + self.wf.setframerate(self.src_rate) + # adapted from https://stackoverflow.com/questions/892199/detect-record-audio-in-python#6743593 + self.wf.writeframes(pack('<' + ('h'*len(self.data)), *self.data)) + self.wf.close() with open(os.path.join(self.out_folder, "transcriptions.txt"), "a") as fp: fp.write(f"{self.fragmentNr}\t{self.currentTranscription}\n") @@ -79,9 +77,6 @@ class Recorder: self.currentTranscription = "" def receive(self, chunk): - if not self.record_voice: - return - if not self.running: return @@ -100,6 +95,12 @@ class Recorder: else: self.subsequentMutedFrames = 0 + + if not self.record_voice: + # we only check this here, as the writeData above is used to write + # transcriptions to the log. + return + d = array('h', chunk) self.data.extend(d) # self.wf.writeframes(chunk)