upgraded configs and xtts

This commit is contained in:
Saifeddine ALOUI 2024-05-01 20:02:41 +02:00
parent cb17c8b917
commit a668646ba9
8 changed files with 147 additions and 27 deletions

@ -1,5 +1,5 @@
# =================== Lord Of Large Language Multimodal Systems Configuration file ===========================
version: 85
version: 87
binding_name: null
model_name: null
model_variant: null
@ -13,6 +13,7 @@ turn_on_code_execution: true
turn_on_code_validation: true
turn_on_open_file_validation: true
turn_on_send_file_validation: true
turn_on_language_validation: true
force_accept_remote_access: false
@ -24,6 +25,8 @@ allowed_origins: []
host: localhost
port: 9600
app_custom_logo: ""
# Genreration parameters
discussion_prompt_separator: "!@>"
seed: -1
@ -80,6 +83,8 @@ copy_to_clipboard_add_all_details: false
# Voice service
enable_voice_service: false
xtts_base_url: http://localhost:8020
xtts_use_deepspeed: false
xtts_use_streaming_mode: true
auto_read: false
current_voice: null
current_language: en

@ -268,7 +268,14 @@ class LollmsApplication(LoLLMsCom):
if self.config.enable_voice_service:
try:
from lollms.services.xtts.lollms_xtts import LollmsXTTS
self.tts = LollmsXTTS(self, voice_samples_path=self.lollms_paths.custom_voices_path, xtts_base_url=self.config.xtts_base_url, wait_for_service=False)
self.tts = LollmsXTTS(
self,
voice_samples_path=self.lollms_paths.custom_voices_path,
xtts_base_url=self.config.xtts_base_url,
wait_for_service=False,
use_deep_speed=self.config.xtts_use_deepspeed,
use_streaming_mode=self.config.xtts_use_streaming_mode
)
except:
self.warning(f"Couldn't load XTTS")
@ -574,12 +581,13 @@ class LollmsApplication(LoLLMsCom):
language = language.lower().strip().split()[0]
language_path = self.lollms_paths.personal_configuration_path/"personalities"/self.personality.name/f"languages_{language}.yaml"
if not language_path.exists():
self.info(f"This is the first time this personality seaks {language}\nLollms is reconditionning the persona in that language.\nThis will be done just once. Next time, the personality will speak {language} out of the box")
self.ShowBlockingMessage(f"This is the first time this personality seaks {language}\nLollms is reconditionning the persona in that language.\nThis will be done just once. Next time, the personality will speak {language} out of the box")
language_path.parent.mkdir(exist_ok=True, parents=True)
conditionning = "!@>system: "+self.personality.fast_gen(f"!@>instruction: Translate the following text to {language}:\n{self.personality.personality_conditioning.replace('!@>system:','')}\n!@>translation:\n")
welcome_message = self.personality.fast_gen(f"!@>instruction: Translate the following text to {language}:\n{self.personality.welcome_message}\n!@>translation:\n")
with open(language_path,"w",encoding="utf-8", errors="ignore") as f:
yaml.safe_dump({"conditionning":conditionning,"welcome_message":welcome_message}, f)
self.HideBlockingMessage()
else:
with open(language_path,"r",encoding="utf-8", errors="ignore") as f:
language_pack = yaml.safe_load(f)
@ -588,6 +596,21 @@ class LollmsApplication(LoLLMsCom):
self.config.save_config()
return True
def del_personality_language(self, language:str):
if language is None or language == "":
return False
language = language.lower().strip().split()[0]
language_path = self.lollms_paths.personal_configuration_path/"personalities"/self.personality.name/f"languages_{language}.yaml"
if language_path.exists():
try:
language_path.unlink()
except Exception as ex:
return False
if self.config.current_language==language:
self.config.current_language="english"
self.config.save_config()
return True
# -------------------------------------- Prompt preparing
def prepare_query(self, client_id: str, message_id: int = -1, is_continue: bool = False, n_tokens: int = 0, generation_type = None, force_using_internet=False) -> Tuple[str, str, List[str]]:
"""

@ -1,5 +1,5 @@
# =================== Lord Of Large Language Multimodal Systems Configuration file ===========================
version: 85
version: 87
binding_name: null
model_name: null
model_variant: null
@ -13,6 +13,7 @@ turn_on_code_execution: true
turn_on_code_validation: true
turn_on_open_file_validation: true
turn_on_send_file_validation: true
turn_on_language_validation: true
force_accept_remote_access: false
@ -24,6 +25,8 @@ allowed_origins: []
host: localhost
port: 9600
app_custom_logo: ""
# Genreration parameters
discussion_prompt_separator: "!@>"
seed: -1
@ -80,6 +83,8 @@ copy_to_clipboard_add_all_details: false
# Voice service
enable_voice_service: false
xtts_base_url: http://localhost:8020
xtts_use_deepspeed: false
xtts_use_streaming_mode: true
auto_read: false
current_voice: null
current_language: en

@ -1,29 +1,32 @@
# =================== Lord Of Large Language Multimodal Systems Configuration file ===========================
version: 82
version: 87
binding_name: null
model_name: null
model_variant: null
model_type: null
show_news_panel: True
show_news_panel: true
# Security measures
turn_on_setting_update_validation: True
turn_on_code_execution: True
turn_on_code_validation: True
turn_on_open_file_validation: False
turn_on_send_file_validation: False
turn_on_setting_update_validation: true
turn_on_code_execution: true
turn_on_code_validation: true
turn_on_open_file_validation: true
turn_on_send_file_validation: true
turn_on_language_validation: true
force_accept_remote_access: false
# Server information
headless_server_mode: False
headless_server_mode: false
allowed_origins: []
# Host information
host: localhost
port: 9600
app_custom_logo: ""
# Genreration parameters
discussion_prompt_separator: "!@>"
seed: -1
@ -57,7 +60,7 @@ use_user_informations_in_discussion: false
discussion_db_name: default
# Automatic updates
debug: False
debug: false
debug_log_file_path: ""
auto_update: true
auto_sync_personalities: true
@ -80,6 +83,8 @@ copy_to_clipboard_add_all_details: false
# Voice service
enable_voice_service: false
xtts_base_url: http://localhost:8020
xtts_use_deepspeed: false
xtts_use_streaming_mode: true
auto_read: false
current_voice: null
current_language: en
@ -109,10 +114,11 @@ petals_device: cuda
# lollms service
enable_lollms_service: false
lollms_base_url: http://localhost:1234
activate_lollms_server: True
activate_ollama_emulator: True
activate_openai_emulator: True
activate_mistralai_emulator: True
lollms_access_keys : "" # set a list of keys separated by coma to restrict access
activate_lollms_server: true
activate_ollama_emulator: true
activate_openai_emulator: true
activate_mistralai_emulator: true
# elastic search service
elastic_search_service: false
@ -149,7 +155,7 @@ data_vectorization_method: "tfidf_vectorizer" #"model_embedding" or "tfidf_vecto
data_visualization_method: "PCA" #"PCA" or "TSNE"
data_vectorization_sentense_transformer_model: "all-MiniLM-L6-v2" # you can use another model by setting its name here or its path
data_vectorization_save_db: False # For each new session, new files
data_vectorization_save_db: false # For each new session, new files
data_vectorization_chunk_size: 512 # chunk size
data_vectorization_overlap_size: 128 # overlap between chunks size
data_vectorization_nb_chunks: 2 # number of chunks to use
@ -164,8 +170,8 @@ internet_vectorization_chunk_size: 512 # chunk size
internet_vectorization_overlap_size: 128 # overlap between chunks size
internet_vectorization_nb_chunks: 2 # number of chunks to use
internet_nb_search_pages: 3 # number of pages to select
internet_quick_search: False # If active the search engine will not load and read the webpages
internet_activate_search_decision: False # If active the ai decides by itself if it needs to do search
internet_quick_search: false # If active the search engine will not load and read the webpages
internet_activate_search_decision: false # If active the ai decides by itself if it needs to do search
# Helpers
pdf_latex_path: null
@ -173,7 +179,7 @@ pdf_latex_path: null
positive_boost: null
negative_boost: null
current_language: null
fun_mode: False
fun_mode: false
# webui configurations

@ -15,7 +15,7 @@ import pkg_resources
from lollms.server.elf_server import LOLLMSElfServer
from lollms.personality import AIPersonality, InstallOption
from ascii_colors import ASCIIColors
from lollms.utilities import load_config, trace_exception, gc
from lollms.utilities import load_config, trace_exception, gc, show_yes_no_dialog
from lollms.security import check_access
from pathlib import Path
from typing import List, Optional
@ -294,6 +294,24 @@ def set_personality_language(request: SetLanguageRequest):
else:
raise HTTPException(status_code=400, detail="Failed to set the personality language")
# Definition of the endpoint for setting the personality language
@router.post("/del_personality_language")
def del_personality_language(request: SetLanguageRequest):
# Access verification
check_access(lollmsElfServer, request.client_id)
# Calling the method to set the personality language
if lollmsElfServer.config.turn_on_language_validation:
if not show_yes_no_dialog("Language deletion request received","I have received a language deletion request. Are you sure?"):
return
success = lollmsElfServer.del_personality_language(request.language)
# Returning an appropriate response depending on whether the operation was successful or not
if success:
return {"message": f"The personality language has been successfully set to {request.language}."}
else:
raise HTTPException(status_code=400, detail="Failed to set the personality language")
# ------------------------------------------- Mounting/Unmounting/Remounting ------------------------------------------------

@ -99,7 +99,9 @@ async def text2Audio(request: LollmsText2AudioRequest):
lollmsElfServer.tts = LollmsXTTS(
lollmsElfServer,
voice_samples_path=Path(__file__).parent/"voices",
xtts_base_url= lollmsElfServer.config.xtts_base_url
xtts_base_url= lollmsElfServer.config.xtts_base_url,
use_deep_speed= lollmsElfServer.config.xtts_use_deep_speed,
use_streaming_mode= lollmsElfServer.config.xtts_use_streaming_mode,
)
except Exception as ex:
return {"url": None, "error":f"{ex}"}
@ -113,7 +115,13 @@ async def text2Audio(request: LollmsText2AudioRequest):
try:
from lollms.services.xtts.lollms_xtts import LollmsXTTS
if lollmsElfServer.tts is None:
lollmsElfServer.tts = LollmsXTTS(lollmsElfServer, voice_samples_path=Path(__file__).parent/"voices", xtts_base_url= lollmsElfServer.config.xtts_base_url)
lollmsElfServer.tts = LollmsXTTS(
lollmsElfServer,
voice_samples_path=Path(__file__).parent/"voices",
xtts_base_url= lollmsElfServer.config.xtts_base_url,
use_deep_speed=lollmsElfServer.config.xtts_use_deepspeed,
use_streaming_mode=lollmsElfServer.config.xtts_use_streaming_mode
)
language = lollmsElfServer.config.current_language# convert_language_name()
if voice!="main_voice":
voices_folder = lollmsElfServer.lollms_paths.custom_voices_path
@ -164,7 +172,9 @@ def start_xtts():
lollmsElfServer.tts = LollmsXTTS(
lollmsElfServer,
voice_samples_path=Path(__file__).parent/"voices",
xtts_base_url= lollmsElfServer.config.xtts_base_url
xtts_base_url= lollmsElfServer.config.xtts_base_url,
use_deep_speed=lollmsElfServer.config.xtts_use_deepspeed,
use_streaming_mode=lollmsElfServer.config.xtts_use_streaming_mode
)
lollmsElfServer.HideBlockingMessage()
except Exception as ex:

@ -0,0 +1,43 @@
import os
from pathlib import Path
import subprocess
import sys
from lollms.utilities import create_conda_env, run_script_in_env
def installAmphion(directory=None):
# Save the current working directory
original_cwd = Path.cwd()
try:
# Set the target directory for installation
if directory is None:
directory = original_cwd
else:
directory = Path(directory)
# Create the directory if it does not exist
directory.mkdir(parents=True, exist_ok=True)
# Change the current working directory to the specified directory
os.chdir(directory)
# Clone the Amphion repository
subprocess.run("git clone https://github.com/open-mmlab/Amphion.git", shell=True)
# Change directory into the cloned Amphion directory
os.chdir("Amphion")
# Create and activate the Conda environment
create_conda_env("amphion", "3.9.15")
# Assuming env.sh installs Python package dependencies via pip
# Modify the path to env.sh if it is located in a different directory
env_sh_path = Path.cwd() / "env.sh"
run_script_in_env("amphion", str(env_sh_path))
finally:
# Restore the original working directory
os.chdir(original_cwd)
# Example usage: Install Amphion in a specific folder
if __name__ == "__main__":
target_directory = "/path/to/specific/folder"
installAmphion(target_directory)

@ -88,7 +88,10 @@ class LollmsXTTS:
share=False,
max_retries=10,
voice_samples_path="",
wait_for_service=True
wait_for_service=True,
use_deep_speed=False,
use_streaming_mode = True
):
if xtts_base_url=="" or xtts_base_url=="http://127.0.0.1:8020":
xtts_base_url = None
@ -97,6 +100,8 @@ class LollmsXTTS:
self.app = app
root_dir = lollms_paths.personal_path
self.voice_samples_path = voice_samples_path
self.use_deep_speed = use_deep_speed
self.use_streaming_mode = use_streaming_mode
# Store the path to the script
if xtts_base_url is None:
@ -138,7 +143,12 @@ class LollmsXTTS:
# Get the path to the current Python interpreter
python_path = sys.executable
ASCIIColors.yellow("Loading XTTS ")
process = run_python_script_in_env("xtts", f"-m xtts_api_server -o {self.output_folder} -sf {self.voice_samples_path} -p {self.xtts_base_url.split(':')[-1].replace('/','')}", wait= False)
options= ""
if self.use_deep_speed:
options += "--deepspeed"
if self.use_streaming_mode:
options += "--streaming-mode --streaming-mode-improve --stream-play-sync"
process = run_python_script_in_env("xtts", f"-m xtts_api_server {options} -o {self.output_folder} -sf {self.voice_samples_path} -p {self.xtts_base_url.split(':')[-1].replace('/','')}", wait= False)
return process
def wait_for_service(self, max_retries = 150, show_warning=True):