diff --git a/api/__init__.py b/api/__init__.py index f604beb4..7585f6a8 100644 --- a/api/__init__.py +++ b/api/__init__.py @@ -20,7 +20,7 @@ from lollms.binding import LOLLMSConfig, BindingBuilder, LLMBinding, ModelBuilde from lollms.paths import LollmsPaths from lollms.helpers import ASCIIColors, trace_exception from lollms.app import LollmsApplication -from lollms.utilities import File64BitsManager +from lollms.utilities import File64BitsManager, PromptReshaper import multiprocessing as mp import threading import time @@ -452,14 +452,16 @@ class LoLLMsAPPI(LollmsApplication): try: if not self.personality.processor is None: - self.personality.processor.add_file(save_path, partial(self.process_chunk, client_id = request.sid)) file.save(save_path) + self.personality.processor.add_file(save_path, partial(self.process_chunk, client_id = request.sid)) # File saved successfully socketio.emit('progress', {'status':True, 'progress': 100}) else: - # Personality doesn't support file sending - socketio.emit('progress', {'status':False, 'error': "Personality doesn't support file sending"}) + file.save(save_path) + self.personality.add_file(save_path, partial(self.process_chunk, client_id = request.sid)) + # File saved successfully + socketio.emit('progress', {'status':True, 'progress': 100}) except Exception as e: # Error occurred while saving the file socketio.emit('progress', {'status':False, 'error': str(e)}) @@ -1185,6 +1187,9 @@ class LoLLMsAPPI(LollmsApplication): print("Finished executing the workflow") return + if len(self.personality.files)>0: + PromptReshaper("Documentation:{{doc}}\n{{Content}}") + self._generate(full_prompt, n_predict, client_id, callback) ASCIIColors.success("\nFinished executing the generation") diff --git a/app.py b/app.py index a505ad29..0a6543d9 100644 --- a/app.py +++ b/app.py @@ -1509,7 +1509,7 @@ class LoLLMsWebUI(LoLLMsAPPI): data = request.get_json() id = data['id'] print(f"- Selecting active personality {id} ...",end="") - if id { - const endIndex = findLastSentenceIndex(startIndex); - const chunk = this.message.content.substring(startIndex, endIndex); - this.msg.text = chunk; - startIndex = endIndex + 1; - this.msg.onend = (event) => { - if (startIndex < this.message.content.length-2) { - // Use setTimeout to add a brief delay before speaking the next chunk - setTimeout(() => { + if (this.message.content.includes('.')){ + const endIndex = findLastSentenceIndex(startIndex); + const chunk = this.message.content.substring(startIndex, endIndex); + this.msg.text = chunk; + startIndex = endIndex + 1; + this.msg.onend = (event) => { + if (startIndex < this.message.content.length-2) { + // Use setTimeout to add a brief delay before speaking the next chunk + setTimeout(() => { + speakChunk(); + }, 1); // Adjust the delay as needed + } else { + this.isSpeaking = false; + console.log("voice off :",this.message.content.length," ",endIndex) + } + }; + this.speechSynthesis.speak(this.msg); + + } + else{ + setTimeout(() => { speakChunk(); }, 1); // Adjust the delay as needed - } else { - this.isSpeaking = false; - console.log("voice off :",this.message.content.length," ",endIndex) - } - }; - this.speechSynthesis.speak(this.msg); + } }; // Speak the first chunk