First working AUdio in and audio out

This commit is contained in:
Saifeddine ALOUI 2023-04-14 02:10:22 +02:00
parent 8a38b44b88
commit f5fdf189d2
8 changed files with 265 additions and 1 deletions

17
app.py
View File

@ -61,6 +61,10 @@ class Gpt4AllWebUI:
"/list_personalities", "list_personalities", self.list_personalities, methods=["GET"]
)
self.add_endpoint(
"/list_languages", "list_languages", self.list_languages, methods=["GET"]
)
self.add_endpoint(
"/list_discussions", "list_discussions", self.list_discussions, methods=["GET"]
)
@ -138,6 +142,19 @@ class Gpt4AllWebUI:
personalities = [f.name for f in personalities_dir.glob('*.yaml')]
return jsonify(personalities)
def list_languages(self):
lanuguages= [
{ "value": "en-US", "label": "English" },
{ "value": "fr-FR", "label": "Français" },
{ "value": "ar-AR", "label": "العربية" },
{ "value": "it-IT", "label": "Italiano" },
{ "value": "de-DE", "label": "Deutsch" },
{ "value": "nl-XX", "label": "Dutch" },
{ "value": "zh-CN", "label": "中國人" }
]
return jsonify(lanuguages)
def list_discussions(self):
discussions = self.db.get_discussions()
return jsonify(discussions)

View File

@ -12,4 +12,5 @@ host: "localhost"
port: 9600
db_path: "database.db"
nb_messages_to_remember: 5
personality: "gpt4all_chatbot"
personality: "gpt4all_chatbot"
language: "en_XX"

202
static/js/audio.js Normal file
View File

@ -0,0 +1,202 @@
isStarted = false;
isSpeaking = false;
const SpeechRecognition = window.SpeechRecognition || webkitSpeechRecognition;
const recognition = new SpeechRecognition();
const synth = window.speechSynthesis || webkitspeechSynthesis;
var voices = synth.getVoices();
function prepre_audio(){
recognition.continuous = true;
recognition.interimResults = true;
recognition.maxAlternatives = 10;
language_select = document.getElementById("language")
}
voices = [];
function populateVoicesList() {
voices = synth.getVoices();
voice_select = document.getElementById("voice")
voice_select.innerHTML="";
for (let i = 0; i < voices.length; i++) {
if (
voices[i].lang.startsWith(
language_select.value.substring(0, 2)
)
) {
const option = document.createElement("option");
option.textContent = `${voices[i].name} (${voices[i].lang})`;
if (voices[i].default) {
option.textContent += " — DEFAULT";
}
option.setAttribute("data-lang", voices[i].lang);
option.setAttribute("data-name", voices[i].name);
voice_select.appendChild(option);
}
}
voice_select.addEventListener("change", function () {
});
}
// Audio code
function splitString(string, maxLength) {
const sentences = string.match(/[^.!?]+[.!?]/g);
const strings = [];
let currentString = "";
if (sentences) {
for (const sentence of sentences) {
if (currentString.length + sentence.length > maxLength) {
strings.push(currentString);
currentString = "";
}
currentString += `${sentence} `;
}
} else {
strings.push(string);
}
if (currentString) {
strings.push(currentString);
}
return strings;
}
function addListeners(button, utterThis) {
utterThis.onstart = (event) => {
isSpeaking = true;
button.style.backgroundColor = "red";
button.style.boxShadow = "2px 2px 0.5px #808080";
};
utterThis.onend = (event) => {
isSpeaking = false;
button.style.backgroundColor = "";
button.style.boxShadow = "";
};
}
function attachAudio_modules(div) {
if (div.parentNode.getElementsByClassName("audio-out-button").length > 0) {
return;
}
const audio_out_button = document.createElement("button");
audio_out_button.id = "audio-out-button";
audio_out_button.classList.add("audio_btn");
audio_out_button.innerHTML = "🕪";
div.classList.add("flex-1");
audio_out_button.classList.add("audio-out-button");
div.appendChild(audio_out_button);
function play_audio() {
if (isSpeaking) {
audio_out_button.style.backgroundColor = "";
audio_out_button.style.boxShadow = "";
synth.cancel();
isSpeaking = false;
} else {
isSpeaking = true;
text = audio_out_button.previousSibling.textContent;
const selectedOption =
voice_select.selectedOptions[0].getAttribute("data-name");
var selectedVoice = null;
for (let i = 0; i < voices.length; i++) {
if (voices[i].name === selectedOption) {
selectedVoice = voices[i];
}
}
if (selectedVoice && selectedVoice.voiceURI === "native") {
const utterThis = new SpeechSynthesisUtterance(text);
utterThis.voice = selectedVoice;
addListeners(audio_out_button, utterThis);
synth.speak(utterThis);
} else {
texts = splitString(text, 200);
texts.forEach((text) => {
const utterThis = new SpeechSynthesisUtterance(text);
utterThis.voice = selectedVoice;
addListeners(audio_out_button, utterThis);
synth.speak(utterThis);
});
}
}
}
audio_out_button.addEventListener("click", () => {
play_audio();
});
// TODO : activate using configuration file
//if (global["auto_audio"]) {
// play_audio();
//}
}
function add_audio_in_ui() {
const inputs = document.querySelectorAll("#user-input");
inputs.forEach((input) => {
// const wrapper = document.createElement("div");
// wrapper.classList.add("flex", "items-center");
var btn = document.querySelectorAll("#audio_in_tool");
var found = false;
// Iterate through the children
for (var i = 0; i < btn.length; i++) {
var child = btn[i];
// Check if the wrapper element contains the current child element
if (input.parentNode.parentNode.contains(child)) {
found = true;
}
}
if (!found) {
const audio_in_button = document.createElement("button");
audio_in_button.id = "audio_in_tool";
audio_in_button.classList.add("audio_btn");
audio_in_button.innerHTML = "🎤";
input.parentNode.insertBefore(
audio_in_button,
input
);
input.classList.add("flex-1");
audio_in_button.classList.add("ml-2");
//wrapper.appendChild(audio_in_button);
//input.parentNode.parentNode.insertBefore(wrapper, input);
//input.parentNode.removeChild(input);
//wrapper.appendChild(input);
audio_in_button.addEventListener("click", () => {
if (isStarted) {
recognition.stop();
isStarted = false;
} else {
recognition.lang = language_select.value;
recognition.start();
isStarted = true;
}
});
recognition.addEventListener("result", (event) => {
let transcript = "";
for (const result of event.results) {
transcript += result[0].transcript;
}
if (transcript != "") {
input.value = transcript;
}
});
recognition.addEventListener("start", () => {
audio_in_button.style.backgroundColor = "red";
audio_in_button.style.boxShadow = "2px 2px 0.5px #808080";
});
recognition.addEventListener("end", () => {
audio_in_button.style.backgroundColor = "";
audio_in_button.style.boxShadow = "";
});
}
});
}

View File

@ -319,6 +319,8 @@ function addMessage(sender, message, id, rank=0, can_edit=false) {
}
chatWindow.appendChild(messageElement);
chatWindow.appendChild(hiddenElement);
attachAudio_modules(messageTextElement);
// scroll to bottom of chat window
chatWindow.scrollTop = chatWindow.scrollHeight;

View File

@ -5,6 +5,8 @@ fetch('/settings')
document.getElementById('settings').innerHTML = html;
modelInput = document.getElementById('model');
personalityInput = document.getElementById('personalities');
languageInput = document.getElementById('language');
seedInput = document.getElementById('seed');
tempInput = document.getElementById('temp');
nPredictInput = document.getElementById('n-predict');
@ -52,6 +54,8 @@ fetch('/settings')
.then((data) => {
console.log(data);
modelInput.value = data["model"]
personalityInput.value = data["personality"]
languageInput.value = data["language"]
seedInput.value = data["seed"]
tempInput.value = data["temp"]
nPredictInput.value = data["n_predict"]
@ -168,6 +172,30 @@ function populate_models(){
}
});
// Fetch the list of .yaml files from the models subfolder
fetch('/list_languages')
.then(response => response.json())
.then(data => {
if (Array.isArray(data)) {
// data is an array
const selectElement = document.getElementById('language');
data.forEach(row => {
const optionElement = document.createElement('option');
optionElement.value = row.value;
optionElement.innerHTML = row.label;
selectElement.appendChild(optionElement);
});
// fetch('/get_args')
// .then(response=> response.json())
// .then(data=>{
// })
} else {
console.error('Expected an array, but received:', data);
}
});
}
populate_models()

View File

@ -32,6 +32,9 @@ fetch('/main')
load_discussion();
update_main();
db_export();
prepre_audio();
add_audio_in_ui();
populateVoicesList();
})
.catch(error => {

View File

@ -55,6 +55,7 @@
<script src="{{ url_for('static', filename='js/settings.js') }}"></script>
<script src="{{ url_for('static', filename='js/db_export.js') }}"></script>
<script src="{{ url_for('static', filename='js/audio.js') }}"></script>
<script src="{{ url_for('static', filename='js/tabs.js') }}"></script>
</body>

View File

@ -10,6 +10,16 @@
<select class="bg-gray-700 shadow appearance-none border rounded w-full py-2 px-3 leading-tight focus:outline-none focus:shadow-outline" id="personalities" name="personalities" value="gpt4all_chatbot.yaml">
</select>
</div>
<div class="mb-4">
<label class="block font-bold mb-2" for="model">Language</label>
<select class="bg-gray-700 shadow appearance-none border rounded w-full py-2 px-3 leading-tight focus:outline-none focus:shadow-outline" id="language" name="language" value="english">
</select>
</div>
<div class="mb-4">
<label class="block font-bold mb-2" for="model">Voice</label>
<select class="bg-gray-700 shadow appearance-none border rounded w-full py-2 px-3 leading-tight focus:outline-none focus:shadow-outline" id="voice" name="voice" value="english">
</select>
</div>
<div class="mb-4">
<label class="block font-bold mb-2" for="seed">Seed</label>