lollms v 9.4

This commit is contained in:
Saifeddine ALOUI 2024-03-09 21:58:55 +01:00
parent a626d99b8b
commit 3e0af3407e
6 changed files with 15 additions and 9 deletions

View File

@ -565,7 +565,7 @@ class LLMBinding:
return file_size
def build_model(self):
def build_model(self, model_name=None):
"""
Build the model.
@ -574,7 +574,10 @@ class LLMBinding:
Returns:
the model
"""
return None
if model_name is not None:
self.model_name = model_name
else:
self.model_name = self.config.model_name
def destroy_model(self):
"""
@ -892,8 +895,8 @@ class ModelBuilder:
self.model = None
self.build_model()
def build_model(self):
self.model = self.binding.build_model()
def build_model(self, model_name=None):
self.model = self.binding.build_model(model_name)
def get_model(self):
return self.model

View File

@ -2522,7 +2522,7 @@ class APScript(StateMachine):
Returns:
str: The generated title.
"""
global_prompt = f"!@>instruction: Create a title for the following prompt:\n!@>prompt:{prompt}\n!@>title:"
global_prompt = f"!@>instructions: Based on the provided prompt, suggest a concise and relevant title that captures the main topic or theme of the conversation. Only return the suggested title, without any additional text or explanation.\n!@>prompt: {prompt}\n!@>title:"
title = self.fast_gen(global_prompt,max_title_length)
return title

View File

@ -142,6 +142,9 @@ async def lollms_generate(request: LollmsGenerateRequest):
return False
def chunks_builder():
if request.model_name in elf_server.binding.list_models() and elf_server.binding.model_name!=request.model_name:
elf_server.binding.build_model(request.model_name)
elf_server.binding.generate(
prompt,
n_predict,

View File

@ -101,8 +101,8 @@ async def text2Audio(request: LollmsText2AudioRequest):
voice_samples_path=Path(__file__).parent/"voices",
xtts_base_url= lollmsElfServer.config.xtts_base_url
)
except:
return {"url": None}
except Exception as ex:
return {"url": None, "error":f"{ex}"}
voice=lollmsElfServer.config.current_voice if request.voice is None else request.voice
index = find_first_available_file_index(lollmsElfServer.tts.output_folder, "voice_sample_",".wav")

View File

@ -164,7 +164,7 @@ class LollmsXTTS:
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
else:
command = f'conda activate xtts && python -m xtts_api_server -o {self.output_folder} -sf {self.voice_samples_path} -p {self.xtts_base_url.split(':')[-1].replace('/','')}'
command = f"conda activate xtts && python -m xtts_api_server -o {self.output_folder} -sf {self.voice_samples_path} -p {self.xtts_base_url.split(':')[-1].replace('/','')}"
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
return process

View File

@ -26,7 +26,7 @@ def get_all_files(path):
setuptools.setup(
name="lollms",
version="9.3.0",
version="9.4.0",
author="Saifeddine ALOUI (ParisNeo)",
author_email="aloui.saifeddine@gmail.com",
description="A python library for AI personality definition",