2023-05-08 02:10:48 +00:00
|
|
|
import whisper
|
|
|
|
import torch
|
|
|
|
import wave
|
|
|
|
import os
|
2023-05-09 21:07:51 +00:00
|
|
|
import threading
|
2023-05-11 00:52:52 +00:00
|
|
|
from tempfile import NamedTemporaryFile
|
2023-05-12 14:54:28 +00:00
|
|
|
import custom_speech_recognition as sr
|
2023-05-11 00:52:52 +00:00
|
|
|
import io
|
2023-05-12 02:06:14 +00:00
|
|
|
from datetime import timedelta
|
2023-05-11 00:52:52 +00:00
|
|
|
import pyaudiowpatch as pyaudio
|
2023-05-12 02:06:14 +00:00
|
|
|
from heapq import merge
|
2023-05-11 00:52:52 +00:00
|
|
|
|
2023-05-13 15:42:02 +00:00
|
|
|
PHRASE_TIMEOUT = 3.05
|
2023-05-13 01:08:31 +00:00
|
|
|
|
|
|
|
MAX_PHRASES = 10
|
2023-05-08 02:10:48 +00:00
|
|
|
|
|
|
|
class AudioTranscriber:
|
2023-05-30 00:34:23 +00:00
|
|
|
def __init__(self, mic_source, speaker_source, model):
|
2023-05-13 02:42:38 +00:00
|
|
|
self.transcript_data = {"You": [], "Speaker": []}
|
2023-05-09 21:07:51 +00:00
|
|
|
self.transcript_changed_event = threading.Event()
|
2023-05-30 00:34:23 +00:00
|
|
|
self.audio_model = model
|
2023-05-13 02:42:38 +00:00
|
|
|
self.audio_sources = {
|
|
|
|
"You": {
|
2023-05-13 14:46:05 +00:00
|
|
|
"sample_rate": mic_source.SAMPLE_RATE,
|
|
|
|
"sample_width": mic_source.SAMPLE_WIDTH,
|
|
|
|
"channels": mic_source.channels,
|
2023-05-13 02:42:38 +00:00
|
|
|
"last_sample": bytes(),
|
|
|
|
"last_spoken": None,
|
|
|
|
"new_phrase": True,
|
|
|
|
"process_data_func": self.process_mic_data
|
|
|
|
},
|
|
|
|
"Speaker": {
|
2023-05-13 14:46:05 +00:00
|
|
|
"sample_rate": speaker_source.SAMPLE_RATE,
|
|
|
|
"sample_width": speaker_source.SAMPLE_WIDTH,
|
|
|
|
"channels": speaker_source.channels,
|
2023-05-13 02:42:38 +00:00
|
|
|
"last_sample": bytes(),
|
|
|
|
"last_spoken": None,
|
|
|
|
"new_phrase": True,
|
|
|
|
"process_data_func": self.process_speaker_data
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
def transcribe_audio_queue(self, audio_queue):
|
2023-05-12 02:06:14 +00:00
|
|
|
while True:
|
2023-05-13 02:42:38 +00:00
|
|
|
who_spoke, data, time_spoken = audio_queue.get()
|
|
|
|
self.update_last_sample_and_phrase_status(who_spoke, data, time_spoken)
|
2023-05-13 14:36:00 +00:00
|
|
|
source_info = self.audio_sources[who_spoke]
|
2023-05-30 23:04:28 +00:00
|
|
|
|
|
|
|
text = ''
|
2023-06-01 18:46:48 +00:00
|
|
|
try:
|
|
|
|
temp_file = NamedTemporaryFile(delete=False, suffix=".wav")
|
|
|
|
except Exception as e:
|
|
|
|
print(e)
|
|
|
|
|
2023-05-31 21:30:17 +00:00
|
|
|
temp_file.close()
|
|
|
|
|
2023-05-30 23:04:28 +00:00
|
|
|
source_info["process_data_func"](source_info["last_sample"], temp_file.name)
|
|
|
|
text = self.audio_model.get_transcription(temp_file.name)
|
|
|
|
|
|
|
|
os.unlink(temp_file.name)
|
2023-05-13 02:42:38 +00:00
|
|
|
|
|
|
|
if text != '' and text.lower() != 'you':
|
|
|
|
self.update_transcript(who_spoke, text, time_spoken)
|
|
|
|
self.transcript_changed_event.set()
|
|
|
|
|
|
|
|
def update_last_sample_and_phrase_status(self, who_spoke, data, time_spoken):
|
|
|
|
source_info = self.audio_sources[who_spoke]
|
|
|
|
if source_info["last_spoken"] and time_spoken - source_info["last_spoken"] > timedelta(seconds=PHRASE_TIMEOUT):
|
|
|
|
source_info["last_sample"] = bytes()
|
|
|
|
source_info["new_phrase"] = True
|
|
|
|
else:
|
|
|
|
source_info["new_phrase"] = False
|
|
|
|
|
|
|
|
source_info["last_sample"] += data
|
|
|
|
source_info["last_spoken"] = time_spoken
|
|
|
|
|
2023-05-30 23:04:28 +00:00
|
|
|
def process_mic_data(self, data, temp_file_name):
|
2023-05-13 02:42:38 +00:00
|
|
|
audio_data = sr.AudioData(data, self.audio_sources["You"]["sample_rate"], self.audio_sources["You"]["sample_width"])
|
|
|
|
wav_data = io.BytesIO(audio_data.get_wav_data())
|
2023-05-30 23:04:28 +00:00
|
|
|
with open(temp_file_name, 'w+b') as f:
|
2023-05-13 02:42:38 +00:00
|
|
|
f.write(wav_data.read())
|
|
|
|
|
2023-05-30 23:04:28 +00:00
|
|
|
def process_speaker_data(self, data, temp_file_name):
|
|
|
|
with wave.open(temp_file_name, 'wb') as wf:
|
2023-05-13 02:42:38 +00:00
|
|
|
wf.setnchannels(self.audio_sources["Speaker"]["channels"])
|
|
|
|
p = pyaudio.PyAudio()
|
|
|
|
wf.setsampwidth(p.get_sample_size(pyaudio.paInt16))
|
|
|
|
wf.setframerate(self.audio_sources["Speaker"]["sample_rate"])
|
|
|
|
wf.writeframes(data)
|
|
|
|
|
|
|
|
def update_transcript(self, who_spoke, text, time_spoken):
|
|
|
|
source_info = self.audio_sources[who_spoke]
|
|
|
|
transcript = self.transcript_data[who_spoke]
|
|
|
|
|
|
|
|
if source_info["new_phrase"] or len(transcript) == 0:
|
|
|
|
if len(transcript) > MAX_PHRASES:
|
2023-05-13 15:41:10 +00:00
|
|
|
transcript.pop(-1)
|
2023-05-13 02:42:38 +00:00
|
|
|
transcript.insert(0, (f"{who_spoke}: [{text}]\n\n", time_spoken))
|
|
|
|
else:
|
|
|
|
transcript[0] = (f"{who_spoke}: [{text}]\n\n", time_spoken)
|
2023-05-08 02:10:48 +00:00
|
|
|
|
2023-05-09 04:10:55 +00:00
|
|
|
def get_transcript(self):
|
2023-05-13 02:42:38 +00:00
|
|
|
combined_transcript = list(merge(
|
|
|
|
self.transcript_data["You"], self.transcript_data["Speaker"],
|
|
|
|
key=lambda x: x[1], reverse=True))
|
|
|
|
combined_transcript = combined_transcript[:MAX_PHRASES]
|
2023-05-13 03:01:36 +00:00
|
|
|
return "".join([t[0] for t in combined_transcript])
|
|
|
|
|
|
|
|
def clear_transcript_data(self):
|
|
|
|
self.transcript_data["You"].clear()
|
2023-05-13 21:20:55 +00:00
|
|
|
self.transcript_data["Speaker"].clear()
|
|
|
|
|
|
|
|
self.audio_sources["You"]["last_sample"] = bytes()
|
|
|
|
self.audio_sources["Speaker"]["last_sample"] = bytes()
|
|
|
|
|
|
|
|
self.audio_sources["You"]["new_phrase"] = True
|
|
|
|
self.audio_sources["Speaker"]["new_phrase"] = True
|