diff --git a/lollms/binding.py b/lollms/binding.py index 76f1975..5cc2d99 100644 --- a/lollms/binding.py +++ b/lollms/binding.py @@ -565,7 +565,7 @@ class LLMBinding: return file_size - def build_model(self): + def build_model(self, model_name=None): """ Build the model. @@ -574,7 +574,10 @@ class LLMBinding: Returns: the model """ - return None + if model_name is not None: + self.model_name = model_name + else: + self.model_name = self.config.model_name def destroy_model(self): """ @@ -892,8 +895,8 @@ class ModelBuilder: self.model = None self.build_model() - def build_model(self): - self.model = self.binding.build_model() + def build_model(self, model_name=None): + self.model = self.binding.build_model(model_name) def get_model(self): return self.model diff --git a/lollms/personality.py b/lollms/personality.py index a456f04..ed7068b 100644 --- a/lollms/personality.py +++ b/lollms/personality.py @@ -2522,7 +2522,7 @@ class APScript(StateMachine): Returns: str: The generated title. """ - global_prompt = f"!@>instruction: Create a title for the following prompt:\n!@>prompt:{prompt}\n!@>title:" + global_prompt = f"!@>instructions: Based on the provided prompt, suggest a concise and relevant title that captures the main topic or theme of the conversation. Only return the suggested title, without any additional text or explanation.\n!@>prompt: {prompt}\n!@>title:" title = self.fast_gen(global_prompt,max_title_length) return title diff --git a/lollms/server/endpoints/lollms_generator.py b/lollms/server/endpoints/lollms_generator.py index b95956e..761b4a0 100644 --- a/lollms/server/endpoints/lollms_generator.py +++ b/lollms/server/endpoints/lollms_generator.py @@ -142,6 +142,9 @@ async def lollms_generate(request: LollmsGenerateRequest): return False def chunks_builder(): + if request.model_name in elf_server.binding.list_models() and elf_server.binding.model_name!=request.model_name: + elf_server.binding.build_model(request.model_name) + elf_server.binding.generate( prompt, n_predict, diff --git a/lollms/server/endpoints/lollms_xtts.py b/lollms/server/endpoints/lollms_xtts.py index 07501a0..ae9eb5c 100644 --- a/lollms/server/endpoints/lollms_xtts.py +++ b/lollms/server/endpoints/lollms_xtts.py @@ -101,8 +101,8 @@ async def text2Audio(request: LollmsText2AudioRequest): voice_samples_path=Path(__file__).parent/"voices", xtts_base_url= lollmsElfServer.config.xtts_base_url ) - except: - return {"url": None} + except Exception as ex: + return {"url": None, "error":f"{ex}"} voice=lollmsElfServer.config.current_voice if request.voice is None else request.voice index = find_first_available_file_index(lollmsElfServer.tts.output_folder, "voice_sample_",".wav") diff --git a/lollms/services/xtts/lollms_xtts.py b/lollms/services/xtts/lollms_xtts.py index 7b0d6f9..80bd91c 100644 --- a/lollms/services/xtts/lollms_xtts.py +++ b/lollms/services/xtts/lollms_xtts.py @@ -164,7 +164,7 @@ class LollmsXTTS: process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True) else: - command = f'conda activate xtts && python -m xtts_api_server -o {self.output_folder} -sf {self.voice_samples_path} -p {self.xtts_base_url.split(':')[-1].replace('/','')}' + command = f"conda activate xtts && python -m xtts_api_server -o {self.output_folder} -sf {self.voice_samples_path} -p {self.xtts_base_url.split(':')[-1].replace('/','')}" process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True) return process diff --git a/setup.py b/setup.py index b46aec6..0e4f681 100644 --- a/setup.py +++ b/setup.py @@ -26,7 +26,7 @@ def get_all_files(path): setuptools.setup( name="lollms", - version="9.3.0", + version="9.4.0", author="Saifeddine ALOUI (ParisNeo)", author_email="aloui.saifeddine@gmail.com", description="A python library for AI personality definition",