fixed audio

This commit is contained in:
Saifeddine ALOUI 2023-08-22 03:43:17 +02:00
parent a740fe6252
commit 40c6cdb191
3 changed files with 32 additions and 19 deletions

View File

@ -20,7 +20,7 @@ from lollms.binding import LOLLMSConfig, BindingBuilder, LLMBinding, ModelBuilde
from lollms.paths import LollmsPaths
from lollms.helpers import ASCIIColors, trace_exception
from lollms.app import LollmsApplication
from lollms.utilities import File64BitsManager
from lollms.utilities import File64BitsManager, PromptReshaper
import multiprocessing as mp
import threading
import time
@ -452,14 +452,16 @@ class LoLLMsAPPI(LollmsApplication):
try:
if not self.personality.processor is None:
self.personality.processor.add_file(save_path, partial(self.process_chunk, client_id = request.sid))
file.save(save_path)
self.personality.processor.add_file(save_path, partial(self.process_chunk, client_id = request.sid))
# File saved successfully
socketio.emit('progress', {'status':True, 'progress': 100})
else:
# Personality doesn't support file sending
socketio.emit('progress', {'status':False, 'error': "Personality doesn't support file sending"})
file.save(save_path)
self.personality.add_file(save_path, partial(self.process_chunk, client_id = request.sid))
# File saved successfully
socketio.emit('progress', {'status':True, 'progress': 100})
except Exception as e:
# Error occurred while saving the file
socketio.emit('progress', {'status':False, 'error': str(e)})
@ -1185,6 +1187,9 @@ class LoLLMsAPPI(LollmsApplication):
print("Finished executing the workflow")
return
if len(self.personality.files)>0:
PromptReshaper("Documentation:{{doc}}\n{{Content}}")
self._generate(full_prompt, n_predict, client_id, callback)
ASCIIColors.success("\nFinished executing the generation")

2
app.py
View File

@ -1509,7 +1509,7 @@ class LoLLMsWebUI(LoLLMsAPPI):
data = request.get_json()
id = data['id']
print(f"- Selecting active personality {id} ...",end="")
if id<len(self.config["personalities"]):
if id<len(self.config["personalities"]) and id<len(self.mounted_personalities):
self.config["active_personality_id"]=id
self.personality = self.mounted_personalities[self.config["active_personality_id"]]
self.apply_settings()

View File

@ -302,22 +302,30 @@ export default {
// Function to speak a chunk of text
const speakChunk = () => {
const endIndex = findLastSentenceIndex(startIndex);
const chunk = this.message.content.substring(startIndex, endIndex);
this.msg.text = chunk;
startIndex = endIndex + 1;
this.msg.onend = (event) => {
if (startIndex < this.message.content.length-2) {
// Use setTimeout to add a brief delay before speaking the next chunk
setTimeout(() => {
if (this.message.content.includes('.')){
const endIndex = findLastSentenceIndex(startIndex);
const chunk = this.message.content.substring(startIndex, endIndex);
this.msg.text = chunk;
startIndex = endIndex + 1;
this.msg.onend = (event) => {
if (startIndex < this.message.content.length-2) {
// Use setTimeout to add a brief delay before speaking the next chunk
setTimeout(() => {
speakChunk();
}, 1); // Adjust the delay as needed
} else {
this.isSpeaking = false;
console.log("voice off :",this.message.content.length," ",endIndex)
}
};
this.speechSynthesis.speak(this.msg);
}
else{
setTimeout(() => {
speakChunk();
}, 1); // Adjust the delay as needed
} else {
this.isSpeaking = false;
console.log("voice off :",this.message.content.length," ",endIndex)
}
};
this.speechSynthesis.speak(this.msg);
}
};
// Speak the first chunk