This commit is contained in:
Saifeddine ALOUI 2025-04-06 21:02:08 +02:00
parent 3ecc28c4c0
commit 8b8baa9c4c
28 changed files with 15 additions and 2015 deletions

View File

@ -445,12 +445,12 @@ class LollmsApplication(LoLLMsCom):
def _generate_text(self, prompt):
max_tokens = min(self.config.ctx_size - self.model.get_nb_tokens(prompt),self.config.max_n_predict if self.config.max_n_predict else self.config.ctx_size- self.model.get_nb_tokens(prompt))
max_tokens = min(self.config.ctx_size - self.model.count_tokens(prompt),self.config.max_n_predict if self.config.max_n_predict else self.config.ctx_size- self.model.count_tokens(prompt))
generated_text = self.model.generate(prompt, max_tokens)
return generated_text.strip()
def _generate_code(self, prompt, template, language):
max_tokens = min(self.config.ctx_size - self.model.get_nb_tokens(prompt),self.config.max_n_predict if self.config.max_n_predict else self.config.ctx_size- self.model.get_nb_tokens(prompt))
max_tokens = min(self.config.ctx_size - self.model.count_tokens(prompt),self.config.max_n_predict if self.config.max_n_predict else self.config.ctx_size- self.model.count_tokens(prompt))
generated_code = self.personality.generate_code(prompt, self.personality.image_files, template, language, max_size= max_tokens)
return generated_code
@ -1123,14 +1123,14 @@ class LollmsApplication(LoLLMsCom):
# boosting information
if self.config.positive_boost:
positive_boost=f"{self.system_custom_header('important information')}"+self.config.positive_boost+"\n"
n_positive_boost = len(self.model.tokenize(positive_boost))
n_positive_boost = self.model.count_tokens(positive_boost)
else:
positive_boost=""
n_positive_boost = 0
if self.config.negative_boost:
negative_boost=f"{self.system_custom_header('important information')}"+self.config.negative_boost+"\n"
n_negative_boost = len(self.model.tokenize(negative_boost))
n_negative_boost = self.model.count_tokens(negative_boost)
else:
negative_boost=""
n_negative_boost = 0
@ -1139,7 +1139,7 @@ class LollmsApplication(LoLLMsCom):
fun_mode=f"""{self.system_custom_header('important information')}
Fun mode activated. In this mode you must answer in a funny playful way. Do not be serious in your answers. Each answer needs to make the user laugh.\n"
"""
n_fun_mode = len(self.model.tokenize(positive_boost))
n_fun_mode = self.model.count_tokens(positive_boost)
else:
fun_mode=""
n_fun_mode = 0
@ -1148,7 +1148,7 @@ Fun mode activated. In this mode you must answer in a funny playful way. Do not
think_first_mode=f"""{self.system_custom_header('important information')}
{self.config.thinking_prompt}
"""
n_think_first_mode = len(self.model.tokenize(positive_boost))
n_think_first_mode = self.model.count_tokens(positive_boost)
else:
think_first_mode=""
n_think_first_mode = 0

View File

@ -98,7 +98,7 @@ class LLMBinding:
models_folder.mkdir(parents=True, exist_ok=True)
def get_nb_tokens(self, prompt):
def count_tokens(self, prompt):
"""
Counts the number of tokens in a prtompt
"""

View File

@ -701,7 +701,7 @@ class AIPersonality:
ASCIIColors.red(" *-*-*-*-*-*-*-*")
ASCIIColors.yellow(prompt)
ASCIIColors.red(" *-*-*-*-*-*-*-*")
ASCIIColors.red(f"Weight : {len(self.model.tokenize(prompt))} tokens")
ASCIIColors.red(f"Weight : {self.model.count_tokens(prompt)} tokens")
ASCIIColors.red(" *-*-*-*-*-*-*-*")
@ -749,7 +749,7 @@ class AIPersonality:
self.model.config.ctx_size - max_generation_size if max_generation_size else self.model.config.ctx_size - self.model.config.min_n_predict,
sacrifice
)
ntk = len(self.model.tokenize(prompt))
ntk = self.model.count_tokens(prompt)
if max_generation_size:
max_generation_size = min(self.model.config.ctx_size - ntk, max_generation_size)
else:
@ -1576,7 +1576,7 @@ Don't forget encapsulate the code inside a html code tag. This is mandatory.
self.print_prompt("gen",prompt)
if max_size is None:
max_size = min(self.config.max_n_predict if self.config.max_n_predict else self.config.ctx_size-len(self.model.tokenize(prompt)), self.config.ctx_size-len(self.model.tokenize(prompt)))
max_size = min(self.config.max_n_predict if self.config.max_n_predict else self.config.ctx_size-self.model.count_tokens(prompt), self.config.ctx_size-self.model.count_tokens(prompt))
self.model.generate_with_images(
prompt,
@ -1597,7 +1597,7 @@ Don't forget encapsulate the code inside a html code tag. This is mandatory.
self.bot_says = ""
if debug:
self.print_prompt("gen",prompt)
ntokens = len(self.model.tokenize(prompt))
ntokens = self.model.count_tokens(prompt)
self.model.generate(
prompt,
@ -2800,12 +2800,12 @@ Do not discuss the information inside the memory, just put the relevant informat
# Calculate static prompt tokens (with empty memory and chunk)
chunk_id = 0
static_tokens = len(self.model.tokenize(example_prompt))
static_tokens = self.model.count_tokens(example_prompt)
# Process text in chunks
while start_token_idx < total_tokens:
# Calculate available tokens for chunk
current_memory_tokens = len(self.model.tokenize(memory))
current_memory_tokens = self.model.count_tokens(memory)
available_tokens = ctx_size - static_tokens - current_memory_tokens
if available_tokens <= 0:
@ -2892,7 +2892,7 @@ The updated memory must be put in a {chunk_processing_output_format} markdown ta
# Truncate memory if needed for final prompt
example_final_prompt = final_prompt_template
final_static_tokens = len(self.model.tokenize(example_final_prompt))
final_static_tokens = self.model.count_tokens(example_final_prompt)
available_final_tokens = ctx_size - final_static_tokens
memory_tokens = self.model.tokenize(memory)

View File

@ -202,7 +202,7 @@ class LollmsContextDetails:
# Debugging information
if self.debug and self.model:
nb_prompt_tokens = len(self.model.tokenize(prompt))
nb_prompt_tokens = self.model.count_tokens(prompt)
nb_tokens = min(
self.ctx_size - nb_prompt_tokens,
self.max_n_predict if self.max_n_predict else self.ctx_size - nb_prompt_tokens

View File

@ -1,92 +0,0 @@
# Title LollmsOpenAIWhisper
# Licence: MIT
# Author : Paris Neo
#
from pathlib import Path
import sys
from lollms.app import LollmsApplication
from lollms.paths import LollmsPaths
from lollms.config import TypedConfig, ConfigTemplate, BaseConfig
import time
import io
import sys
import requests
import os
import base64
import subprocess
import time
import json
import platform
from dataclasses import dataclass
from PIL import Image, PngImagePlugin
from enum import Enum
from typing import List, Dict, Any
from ascii_colors import ASCIIColors, trace_exception
from lollms.paths import LollmsPaths
from lollms.utilities import PackageManager, find_next_available_filename
from lollms.stt import LollmsSTT
import subprocess
import shutil
from tqdm import tqdm
import threading
from io import BytesIO
from openai import OpenAI
class LollmsOpenAIWhisper(LollmsSTT):
def __init__(
self,
app:LollmsApplication,
output_folder:str|Path=None
):
"""
Initializes the LollmsDalle binding.
Args:
api_key (str): The API key for authentication.
output_folder (Path|str): The output folder where to put the generated data
"""
api_key = os.getenv("OPENAI_KEY","")
service_config = TypedConfig(
ConfigTemplate([
{"name":"api_key", "type":"str", "value":api_key, "help":"A valid Open AI key to generate text using anthropic api"},
{
"name": "model",
"type": "str",
"value": "whisper-1",
"options": ["whisper-1"],
"help": "The model to be used"
},
]),
BaseConfig(config={
"api_key": "", # use avx2
})
)
super().__init__("openai_whisper",app, service_config, output_folder)
self.client = OpenAI(api_key=self.service_config.api_key)
self.ready = True
def settings_updated(self):
self.client = OpenAI(api_key=self.service_config.api_key)
self.ready = True
def transcribe(
self,
wav_path: str|Path,
model:str="",
output_path:str|Path=None
):
if model=="" or model is None:
model = self.model
if output_path is None:
output_path = self.output_path
audio_file= open(str(wav_path), "rb")
transcription = self.client.audio.transcriptions.create(
model=model,
file=audio_file,
response_format="text"
)
return transcription

View File

@ -1,88 +0,0 @@
# Title LollmsWhisper
# Licence: MIT
# Author : Paris Neo
#
from pathlib import Path
from lollms.app import LollmsApplication
from lollms.stt import LollmsSTT
from dataclasses import dataclass
from PIL import Image, PngImagePlugin
from enum import Enum
from typing import List, Dict, Any
from ascii_colors import ASCIIColors, trace_exception
from lollms.paths import LollmsPaths
import subprocess
import pipmaster as pm
from lollms.config import TypedConfig, ConfigTemplate, BaseConfig
try:
if not pm.is_installed("openai-whisper"):
pm.install("openai-whisper")
try:
pass#install_conda_package("conda-forge::ffmpeg")
except Exception as ex:
trace_exception(ex)
ASCIIColors.red("Couldn't install ffmpeg")
except:
try:
pass#install_conda_package("conda-forge::ffmpeg")
except Exception as ex:
trace_exception(ex)
ASCIIColors.red("Couldn't install ffmpeg")
pm.install("git+https://github.com/openai/whisper.git")
try:
import whisper
except:
pm.install("openai-whisper")
class LollmsWhisper(LollmsSTT):
def __init__(
self,
app:LollmsApplication,
output_folder:str|Path=None
):
"""
Initializes the LollmsDalle binding.
Args:
api_key (str): The API key for authentication.
output_folder (Path|str): The output folder where to put the generated data
"""
service_config = TypedConfig(
ConfigTemplate([
{
"name": "model_name",
"type": "str",
"value": "base",
"options": ["tiny", "tiny.en", "base", "base.en", "small", "small.en", "medium", "medium.en", "large", "large-v2", "large-v3", "turbo"],
"help": "The engine to be used"
},
]),
BaseConfig(config={
"api_key": "", # use avx2
})
)
super().__init__("whisper",app, service_config, output_folder)
try:
self.whisper = whisper.load_model(service_config.model_name)
except:
ASCIIColors.red("Couldn't load whisper model!\nWhisper will be disabled")
self.whisper = None
self.ready = True
def settings_updated(self):
pass
def transcribe(
self,
wave_path: str|Path
)->str:
if self.whisper:
result = self.whisper.transcribe(str(wave_path))
return result["text"]
else:
ASCIIColors.error("Whisper is broken")
return ""

View File

@ -1,140 +0,0 @@
"""
Lollms TTM Module
=================
This module is part of the Lollms library, designed to provide Text-to-Music (TTM) functionalities within the LollmsApplication framework. The base class `LollmsTTM` is intended to be inherited and implemented by other classes that provide specific TTM functionalities.
Author: ParisNeo, a computer geek passionate about AI
"""
from lollms.app import LollmsApplication
from pathlib import Path
from typing import List, Dict
from lollms.ttm import LollmsTTM
from lollms.utilities import PackageManager, File_Path_Generator, check_and_install_torch
import pipmaster as pm
pm.install_if_missing("audiocraft")
from audiocraft.models import musicgen
class LollmsMusicGen(LollmsTTM):
"""
LollmsMusicGen is a model class for implementing Text-to-Music (TTM) functionalities within the LollmsApplication.
Attributes:
app (LollmsApplication): The instance of the main Lollms application.
model (str): The TTM model to be used for image generation.
api_key (str): API key for accessing external TTM services (if needed).
output_path (Path or str): Path where the output image files will be saved.
voices (List[str]): List of available voices for TTM (to be filled by the child class).
models (List[str]): List of available models for TTM (to be filled by the child class).
"""
def __init__(
self,
name:str,
app: LollmsApplication,
model="facebook/musicgen-melody",#"facebook/musicgen-small","facebook/musicgen-medium","facebook/musicgen-melody","facebook/musicgen-large"
device="cuda",
api_key="",
output_path=None
):
"""
Initializes the LollmsTTM class with the given parameters.
Args:
app (LollmsApplication): The instance of the main Lollms application.
model (str, optional): The TTM model to be used for image generation. Defaults to an empty string.
api_key (str, optional): API key for accessing external TTM services. Defaults to an empty string.
output_path (Path or str, optional): Path where the output image files will be saved. Defaults to None.
"""
self.name = name
self.app = app
self.model = model
self.api_key = api_key
self.output_path = output_path
self.music_model = musicgen.MusicGen.get_pretrained(model, device=device)
self.models = [] # To be filled by the child class
self.ready = True
def settings_updated(self):
pass
def generate(self,
positive_prompt: str,
negative_prompt: str = "",
duration=30,
generation_engine=None,
output_path = None) -> List[Dict[str, str]]:
"""
Generates images based on the given positive and negative prompts.
Args:
positive_prompt (str): The positive prompt describing the desired image.
negative_prompt (str, optional): The negative prompt describing what should be avoided in the image. Defaults to an empty string.
Returns:
List[Dict[str, str]]: A list of dictionaries containing image paths, URLs, and metadata.
"""
if output_path is None:
output_path = self.output_path
import torchaudio
self.music_model.set_generation_params(duration=duration)
res = self.music_model.generate([positive_prompt], progress=True)
output_path.mkdir(parents=True, exist_ok=True)
output_file = File_Path_Generator.generate_unique_file_path(output_path, "generation","wav")
torchaudio.save(output_file, res.reshape(1, -1).cpu(), 32000)
return output_file, {"prompt":positive_prompt,"duration":duration}
def generate_from_samples(self, positive_prompt: str, samples: List[str], negative_prompt: str = "") -> List[Dict[str, str]]:
"""
Generates images based on the given positive prompt and reference images.
Args:
positive_prompt (str): The positive prompt describing the desired image.
images (List[str]): A list of paths to reference images.
negative_prompt (str, optional): The negative prompt describing what should be avoided in the image. Defaults to an empty string.
Returns:
List[Dict[str, str]]: A list of dictionaries containing image paths, URLs, and metadata.
"""
pass
@staticmethod
def verify(app: LollmsApplication) -> bool:
"""
Verifies if the TTM service is available.
Args:
app (LollmsApplication): The instance of the main Lollms application.
Returns:
bool: True if the service is available, False otherwise.
"""
return True
@staticmethod
def install(app: LollmsApplication) -> bool:
"""
Installs the necessary components for the TTM service.
Args:
app (LollmsApplication): The instance of the main Lollms application.
Returns:
bool: True if the installation was successful, False otherwise.
"""
return True
@staticmethod
def get(app: LollmsApplication) -> 'LollmsTTM':
"""
Returns the LollmsTTM class.
Args:
app (LollmsApplication): The instance of the main Lollms application.
Returns:
LollmsTTM: The LollmsTTM class.
"""
return LollmsTTM

View File

@ -1,43 +0,0 @@
import os
from pathlib import Path
import subprocess
import sys
from lollms.utilities import create_conda_env, run_script_in_env
def installAmphion(directory=None):
# Save the current working directory
original_cwd = Path.cwd()
try:
# Set the target directory for installation
if directory is None:
directory = original_cwd
else:
directory = Path(directory)
# Create the directory if it does not exist
directory.mkdir(parents=True, exist_ok=True)
# Change the current working directory to the specified directory
os.chdir(directory)
# Clone the Amphion repository
subprocess.run("git clone https://github.com/open-mmlab/Amphion.git", shell=True)
# Change directory into the cloned Amphion directory
os.chdir("Amphion")
# Create and activate the Conda environment
create_conda_env("amphion", "3.9.15")
# Assuming env.sh installs Python package dependencies via pip
# Modify the path to env.sh if it is located in a different directory
env_sh_path = Path.cwd() / "env.sh"
run_script_in_env("amphion", str(env_sh_path))
finally:
# Restore the original working directory
os.chdir(original_cwd)
# Example usage: Install Amphion in a specific folder
if __name__ == "__main__":
target_directory = "/path/to/specific/folder"
installAmphion(target_directory)

View File

@ -1,207 +0,0 @@
# Title LollmsASR
# Licence: MIT
# Author : Paris Neo
# Adapted from the work of ahmetoner's whisper-asr-webservice
# check it out : https://github.com/ahmetoner/whisper-asr-webservice
# Here is a copy of the LICENCE https://github.com/ahmetoner/whisper-asr-webservice/blob/main/LICENCE
# All rights are reserved
from pathlib import Path
import sys
from lollms.app import LollmsApplication
from lollms.paths import LollmsPaths
from lollms.config import TypedConfig, ConfigTemplate, BaseConfig
from lollms.utilities import PackageManager
import time
import io
import sys
import requests
import os
import base64
import subprocess
import time
import json
import platform
import threading
from dataclasses import dataclass
from PIL import Image, PngImagePlugin
from enum import Enum
from typing import List, Dict, Any
import uuid
from ascii_colors import ASCIIColors, trace_exception
from lollms.paths import LollmsPaths
from lollms.utilities import git_pull, show_yes_no_dialog, run_python_script_in_env, create_conda_env, run_pip_in_env, environment_exists
import subprocess
import platform
def verify_asr(lollms_paths:LollmsPaths):
# Clone repository
root_dir = lollms_paths.personal_path
shared_folder = root_dir/"shared"
asr_path = shared_folder / "asr"
return asr_path.exists()
def install_asr(lollms_app:LollmsApplication):
ASCIIColors.green("asr installation started")
repo_url = "https://github.com/ParisNeo/whisper-asr-webservice.git"
root_dir = lollms_app.lollms_paths.personal_path
shared_folder = root_dir/"shared"
asr_path = shared_folder / "asr"
# Step 1: Clone or update the repository
if os.path.exists(asr_path):
print("Repository already exists. Pulling latest changes...")
try:
subprocess.run(["git", "-C", asr_path, "pull"], check=True)
except:
subprocess.run(["git", "clone", repo_url, asr_path], check=True)
else:
print("Cloning repository...")
subprocess.run(["git", "clone", repo_url, asr_path], check=True)
# Step 2: Create or update the Conda environment
if environment_exists("asr"):
print("Conda environment 'asr' already exists. Updating...")
# Here you might want to update the environment, e.g., update Python or dependencies
# This step is highly dependent on how you manage your Conda environments and might involve
# running `conda update` commands or similar.
else:
print("Creating Conda environment 'asr'...")
create_conda_env("asr", "3.10")
# Step 3: Install or update dependencies using your custom function
requirements_path = os.path.join(asr_path, "requirements.txt")
run_pip_in_env("asr", f"install .", cwd=asr_path)
# Step 4: Launch the server
# Assuming the server can be started with a Python script in the cloned repository
print("Launching asr API server...")
run_python_script_in_env("asr", "asr_api_server", cwd=asr_path)
print("asr API server setup and launch completed.")
ASCIIColors.cyan("Done")
ASCIIColors.cyan("Installing asr-api-server")
ASCIIColors.green("asr server installed successfully")
def get_asr(lollms_paths:LollmsPaths):
root_dir = lollms_paths.personal_path
shared_folder = root_dir/"shared"
asr_path = shared_folder / "asr"
asr_script_path = asr_path / "lollms_asr.py"
git_pull(asr_path)
if asr_script_path.exists():
ASCIIColors.success("lollms_asr found.")
ASCIIColors.success("Loading source file...",end="")
# use importlib to load the module from the file path
from lollms.services.asr.lollms_asr import LollmsASR
ASCIIColors.success("ok")
return LollmsASR
class LollmsASR:
has_controlnet = False
def __init__(
self,
app:LollmsApplication,
asr_base_url=None,
share=False,
max_retries=20,
wait_for_service=True
):
self.generation_threads = []
self.ready = False
if asr_base_url=="" or asr_base_url=="http://127.0.0.1:9000":
asr_base_url = None
# Get the current directory
lollms_paths = app.lollms_paths
self.app = app
root_dir = lollms_paths.personal_path
# Store the path to the script
if asr_base_url is None:
self.asr_base_url = "http://127.0.0.1:9000"
if not verify_asr(lollms_paths):
install_asr(app.lollms_paths)
else:
self.asr_base_url = asr_base_url
self.auto_asr_url = self.asr_base_url+"/asr"
shared_folder = root_dir/"shared"
self.asr_path = shared_folder / "asr"
ASCIIColors.red(" _ _ _ ___ ___ ___ ___________ ")
ASCIIColors.red("| | | | | | | \/ | / _ \ / ___| ___ \ ")
ASCIIColors.red("| | ___ | | | | | . . |___ / /_\ \\ `--.| |_/ /")
ASCIIColors.red("| | / _ \| | | | | |\/| / __| | _ | `--. \ / ")
ASCIIColors.red("| |___| (_) | |____| |____| | | \__ \ | | | |/\__/ / |\ \ ")
ASCIIColors.red("\_____/\___/\_____/\_____/\_| |_/___/ \_| |_/\____/\_| \_|")
ASCIIColors.red(" ______ ")
ASCIIColors.red(" |______| ")
ASCIIColors.red(" Forked from ahmetoner's asr server")
ASCIIColors.red(" Integration in lollms by ParisNeo using ahmetoner's webapi")
ASCIIColors.red(" Address :",end="")
ASCIIColors.yellow(f"{self.asr_base_url}")
self.output_folder = app.lollms_paths.personal_outputs_path/"audio_out"
self.output_folder.mkdir(parents=True, exist_ok=True)
if not self.wait_for_service(1,False):
ASCIIColors.info("Loading lollms_asr")
# Launch the Flask service using the appropriate script for the platform
self.process = self.run_asr_api_server()
# Wait until the service is available at http://127.0.0.1:9000/
if wait_for_service:
self.wait_for_service()
else:
self.wait_for_service_in_another_thread(max_retries=max_retries)
def run_asr_api_server(self):
# Get the path to the current Python interpreter
ASCIIColors.yellow("Loading asr ")
process = run_python_script_in_env("asr", f"app/webservice.py", wait= False, cwd=self.asr_path)
return process
def wait_for_service_in_another_thread(self, max_retries=150, show_warning=True):
thread = threading.Thread(target=self.wait_for_service, args=(max_retries, show_warning))
thread.start()
return thread
def wait_for_service(self, max_retries = 150, show_warning=True):
print(f"Waiting for asr service (max_retries={max_retries})")
url = f"{self.asr_base_url}/languages"
# Adjust this value as needed
retries = 0
while retries < max_retries or max_retries<0:
try:
response = requests.get(url)
if response.status_code == 200:
print(f"voices_folder is {self.voices_folder}.")
if self.voices_folder is not None:
print("Generating sample audio.")
voice_file = [v for v in self.voices_folder.iterdir() if v.suffix==".wav"]
self.tts_audio("asr is ready",voice_file[0].name)
print("Service is available.")
if self.app is not None:
self.app.success("asr Service is now available.")
self.ready = True
return True
except Exception as ex:
trace_exception(ex)
retries += 1
ASCIIColors.yellow("Waiting for asr...")
time.sleep(5)
if show_warning:
print("Service did not become available within the given time.")
if self.app is not None:
self.app.error("asr Service did not become available within the given time.")
return False

View File

@ -1,212 +0,0 @@
# Title LollmsOpenAITTS
# Licence: MIT
# Author : Paris Neo
# Uses open AI api to perform text to speech
#
from pathlib import Path
from lollms.app import LollmsApplication
from lollms.paths import LollmsPaths
from lollms.config import TypedConfig, ConfigTemplate, BaseConfig
import sys
import requests
from typing import List, Dict, Any
from ascii_colors import ASCIIColors, trace_exception
from lollms.paths import LollmsPaths
from lollms.utilities import PackageManager, find_next_available_filename
from lollms.tts import LollmsTTS
import pipmaster as pm
if not pm.is_installed("sounddevice"):
pm.install("sounddevice")
if not pm.is_installed("soundfile"):
pm.install("soundfile")
import sounddevice as sd
import soundfile as sf
import os
def get_Whisper(lollms_paths:LollmsPaths):
return LollmsElevenLabsTTS
class LollmsElevenLabsTTS(LollmsTTS):
def __init__(
self,
app: LollmsApplication,
output_folder: Path | str = None,
):
"""
Initializes the LollmsDalle binding.
Args:
api_key (str): The API key for authentication.
output_folder (Path|str): The output folder where to put the generated data
"""
# Check for the ELEVENLABS_KEY environment variable if no API key is provided
api_key = os.getenv("ELEVENLABS_KEY","")
service_config = TypedConfig(
ConfigTemplate(
[
{
"name": "model_id",
"type": "str",
"value": "eleven_turbo_v2_5",
"options": ["eleven_turbo_v2_5","eleven_flash_v2","eleven_multilingual_v2","eleven_multilingual_v1","eleven_english_sts_v2","eleven_english_sts_v1"],
"help": "The ID of the model to use for text-to-speech generation. Example: 'eleven_turbo_v2_5'."
},
{
"name": "voice_name",
"type": "str",
"value": "Sarah",
"help": "The name of the voice to use for text-to-speech generation. Example: 'Sarah'."
},
{
"name": "language",
"type": "str",
"value": "en",
"options": ["en", "ja", "zh", "de", "hi", "fr", "ko", "pt", "it", "es", "id", "nl", "tr", "fil", "pl", "sv", "bg", "ro", "ar", "cs", "el", "fi", "hr", "ms", "sk", "da", "ta", "uk", "ru", "hu", "no", "vi"], # Dynamically populated based on the selected model_id
"help": "The language to use for text-to-speech generation. Supported languages depend on the selected model."
},
{
"name": "api_key",
"type": "str",
"value": api_key,
"help": "A valid API key for accessing the Eleven Labs service."
},
{
"name": "similarity_boost",
"type": "bool",
"value": False,
"help": "If enabled, increases the similarity of the generated speech to the selected voice."
},
{
"name": "streaming",
"type": "bool",
"value": False,
"help": "If enabled, the text-to-speech output will be streamed in real-time instead of being generated all at once."
}
]
),
BaseConfig(config={
"api_key": "", # use avx2
})
)
super().__init__("elevenlabs_tts", app, service_config, output_folder)
self.ready = True
self.voices = []
self.voice_id_map = {}
try:
self._fetch_voices()
self.voice_id = self._get_voice_id(service_config.voice_name)
except:
pass
def settings_updated(self):
pass
def _fetch_voices(self):
url = "https://api.elevenlabs.io/v1/voices"
headers = {"xi-api-key": self.service_config.api_key}
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
data = response.json()
for voice in data.get("voices", []):
name = voice.get("name")
voice_id = voice.get("voice_id")
if name and voice_id:
self.voices.append(name)
self.voice_id_map[name] = voice_id
except requests.RequestException as e:
print(f"Error fetching voices: {e}")
# Fallback to default voice
self.voices = ["Sarah"]
self.voice_id_map = {"Sarah": "EXAVITQu4vr4xnSDxMaL"}
def _get_voice_id(self, voice_name: str) -> str:
return self.voice_id_map.get(voice_name, "EXAVITQu4vr4xnSDxMaL") # Default to Sarah if not found
def set_voice(self, voice_name: str):
if voice_name in self.voices:
self.service_config.voice_name = voice_name
self.voice_id = self._get_voice_id(voice_name)
else:
raise ValueError(f"Voice '{voice_name}' not found. Available voices: {', '.join(self.voices)}")
def tts_file(self, text, file_name_or_path: Path | str = None, speaker=None, language="en", use_threading=False):
speech_file_path = file_name_or_path
payload = {
"text": text,
"language_code": language,
"model_id": self.service_config.model_id,
"voice_settings": {
"stability": self.service_config.stability,
"similarity_boost": self.service_config.similarity_boost
}
}
headers = {
"xi-api-key": self.service_config.api_key,
"Content-Type": "application/json"
}
if self.service_config.streaming:
url = f"https://api.elevenlabs.io/v1/text-to-speech/{self.voice_id}/stream"
response = requests.post(url, json=payload, headers=headers)
# Handle streaming response if needed
else:
url = f"https://api.elevenlabs.io/v1/text-to-speech/{self.voice_id}"
response = requests.post(url, json=payload, headers=headers)
if response.status_code==400:
del payload["language_code"]
url = f"https://api.elevenlabs.io/v1/text-to-speech/{self.voice_id}"
response = requests.post(url, json=payload, headers=headers)
with open(speech_file_path, 'wb') as f:
f.write(response.content)
return speech_file_path
def tts_audio(self, text, speaker: str = None, file_name_or_path: Path | str = None, language="en", use_threading=False):
speech_file_path = file_name_or_path
payload = {
"text": text,
"language_code": language,
"model_id": self.service_config.model_id,
"voice_settings": {
"stability": self.service_config.stability,
"similarity_boost": self.service_config.similarity_boost
}
}
headers = {
"xi-api-key": self.service_config.api_key,
"Content-Type": "application/json"
}
if self.service_config.streaming:
url = f"https://api.elevenlabs.io/v1/text-to-speech/{self.voice_id}/stream"
response = requests.post(url, json=payload, headers=headers)
# Handle streaming response if needed
else:
url = f"https://api.elevenlabs.io/v1/text-to-speech/{self.voice_id}"
response = requests.post(url, json=payload, headers=headers)
if response.status_code == 200:
with open(speech_file_path, 'wb') as f:
f.write(response.content)
else:
self.app.error(f"Couldn't generate speech, {response.reason}")
def play_audio(file_path):
# Read the audio file
data, fs = sf.read(file_path, dtype='float32')
# Play the audio file
sd.play(data, fs)
# Wait until the file is done playing
sd.wait()
# Example usage
play_audio(speech_file_path)

View File

@ -1,147 +0,0 @@
from pathlib import Path
from typing import List, Dict, Any
import httpx
from pydantic import BaseModel
from lollms.app import LollmsApplication
from lollms.paths import LollmsPaths
from lollms.tts import LollmsTTS
from lollms.utilities import PackageManager, find_next_available_filename
from lollms.config import TypedConfig, ConfigTemplate, BaseConfig
import pipmaster as pm
import os
if not pm.is_installed("sounddevice"):
pm.install("sounddevice")
if not pm.is_installed("soundfile"):
pm.install("soundfile")
if not pm.is_installed("ormsgpack"):
pm.install("ormsgpack")
import ormsgpack
import sounddevice as sd
import soundfile as sf
class ServeReferenceAudio(BaseModel):
audio: bytes
text: str
class ServeTTSRequest(BaseModel):
text: str
chunk_length: int = 200
format: str = "mp3"
mp3_bitrate: int = 128
references: List[ServeReferenceAudio] = []
reference_id: str | None = None
normalize: bool = True
latency: str = "normal"
def get_FishAudioTTS(lollms_paths: LollmsPaths):
return LollmsFishAudioTTS
class LollmsFishAudioTTS(LollmsTTS):
def __init__(
self,
app: LollmsApplication,
output_folder: Path | str = None,
):
"""
Initializes the LollmsDalle binding.
Args:
api_key (str): The API key for authentication.
output_folder (Path|str): The output folder where to put the generated data
"""
# Check for the FISHTTS_KEY environment variable if no API key is provided
api_key = os.getenv("FISHTTS_KEY","")
service_config = TypedConfig(
ConfigTemplate([
{"name":"voice_name", "type":"str", "value":"default", "help":"A valid model id"},
{"name":"api_key", "type":"str", "value":api_key, "help":"A valid eleven labs key"},
{"name":"similarity_boost", "type":"bool", "value":False, "help":"A valid model id"},
{"name":"streaming", "type":"bool", "value":False, "help":"A valid model id"},
]),
BaseConfig(config={
"api_key": "", # use avx2
})
)
super().__init__("fishaudio_tts", app, service_config, output_folder)
self.output_folder = output_folder
self.reference_folder = app.lollms_paths.custom_voices_path/"fish_tts"
self.reference_folder.mkdir(exist_ok=True, parents=True)
self.voices = self._load_voices()
self.ready = True
def _load_voices(self) -> List[str]:
if not self.reference_folder or not self.reference_folder.exists():
return ["default"]
voices = []
for audio_file in self.reference_folder.glob("*.mp3"):
text_file = audio_file.with_suffix(".txt")
if text_file.exists():
voices.append(audio_file.stem)
return voices or ["default"]
def set_voice(self, voice_name: str):
if voice_name in self.voices:
self.voice_name = voice_name
else:
raise ValueError(f"Voice '{voice_name}' not found. Available voices: {', '.join(self.voices)}")
def _get_reference_audio(self, voice_name: str) -> ServeReferenceAudio | None:
if voice_name == "default":
return None
audio_file = self.reference_folder / f"{voice_name}.mp3"
text_file = self.reference_folder / f"{voice_name}.txt"
if audio_file.exists() and text_file.exists():
return ServeReferenceAudio(
audio=audio_file.read_bytes(),
text=text_file.read_text()
)
return None
def tts_file(self, text, file_name_or_path: Path | str = None, speaker=None, language="en", use_threading=False):
speech_file_path = Path(file_name_or_path) if file_name_or_path else self._get_output_path("mp3")
reference = self._get_reference_audio(speaker)
request = ServeTTSRequest(
text=text,
references=[reference] if reference else []
)
with httpx.Client() as client:
with client.stream(
"POST",
"https://api.fish.audio/v1/tts",
content=ormsgpack.packb(request, option=ormsgpack.OPT_SERIALIZE_PYDANTIC),
headers={
"authorization": f"Bearer {self.service_config.api_key}",
"content-type": "application/msgpack",
},
timeout=None,
) as response:
with open(speech_file_path, "wb") as f:
for chunk in response.iter_bytes():
f.write(chunk)
return speech_file_path
def tts_audio(self, text, speaker: str = None, file_name_or_path: Path | str = None, language="en", use_threading=False):
speech_file_path = self.tts_file(text, file_name_or_path, speaker, language, use_threading)
def play_audio(file_path):
data, fs = sf.read(file_path, dtype='float32')
sd.play(data, fs)
sd.wait()
play_audio(speech_file_path)
def _get_output_path(self, extension: str) -> Path:
if self.output_path:
return find_next_available_filename(self.output_path, f"output.{extension}")
return find_next_available_filename(Path.cwd(), f"output.{extension}")

View File

@ -1,150 +0,0 @@
# Title LollmsOpenAITTS
# Licence: MIT
# Author : Paris Neo
# Uses open AI api to perform text to speech
#
from pathlib import Path
import sys
from lollms.app import LollmsApplication
from lollms.paths import LollmsPaths
from lollms.config import TypedConfig, ConfigTemplate, BaseConfig
import time
import io
import sys
import requests
import os
import base64
import subprocess
import time
import json
import platform
from dataclasses import dataclass
from PIL import Image, PngImagePlugin
from enum import Enum
from typing import List, Dict, Any
from ascii_colors import ASCIIColors, trace_exception
from lollms.paths import LollmsPaths
from lollms.utilities import PackageManager, find_next_available_filename
from lollms.tts import LollmsTTS
import subprocess
import shutil
from pathlib import Path
from tqdm import tqdm
import threading
from io import BytesIO
import pipmaster as pm
if not pm.is_installed("openai"):
pm.install("openai")
from openai import OpenAI
if not pm.is_installed("sounddevice"):
pm.install("sounddevice")
if not pm.is_installed("soundfile"):
pm.install("soundfile")
import sounddevice as sd
import soundfile as sf
def get_Whisper(lollms_paths:LollmsPaths):
return LollmsOpenAITTS
class LollmsOpenAITTS(LollmsTTS):
def __init__(
self,
app:LollmsApplication,
output_folder:str|Path=None
):
"""
Initializes the LollmsDalle binding.
Args:
api_key (str): The API key for authentication.
output_folder (Path|str): The output folder where to put the generated data
"""
# Check for the OPENAI_KEY environment variable if no API key is provided
api_key = os.getenv("OPENAI_KEY","")
service_config = TypedConfig(
ConfigTemplate([
{
"name": "model",
"type": "str",
"value": "tts-1",
"options": ["alloy", "echo", "fable", "nova", "shimmer"],
"help": "The model to use for text-to-speech. Options: 'alloy', 'echo', 'fable', 'nova', 'shimmer'."
},
{
"name": "voice",
"type": "str",
"value": "alloy",
"help": "The voice to use for text-to-speech. Options: 'alloy', 'echo', 'fable', 'nova', 'shimmer'."
},
{
"name": "api_key",
"type": "str",
"value": api_key,
"help": "A valid API key for accessing the text-to-speech service."
},
]),
BaseConfig(config={
"api_key": "", # use avx2
})
)
super().__init__("openai_tts", app, service_config, output_folder)
self.client = OpenAI(api_key=self.service_config.api_key)
self.output_folder = output_folder
self.ready = True
def settings_updated(self):
self.client = OpenAI(api_key=self.service_config.api_key)
def tts_file(self, text, speaker, file_name_or_path, language="en"):
speech_file_path = file_name_or_path
response = self.client.audio.speech.create(
model=self.service_config.model,
voice=self.service_config.voice,
input=text,
response_format="wav"
)
response.write_to_file(speech_file_path)
def tts_audio(self, text, speaker:str=None, file_name_or_path:Path|str=None, language="en", use_threading=False):
speech_file_path = file_name_or_path
response = self.client.audio.speech.create(
model=self.service_config.model,
voice=self.service_config.voice if speaker is None else speaker,
input=text,
response_format="wav"
)
response.write_to_file(speech_file_path)
def play_audio(file_path):
# Read the audio file
data, fs = sf.read(file_path, dtype='float32')
# Play the audio file
sd.play(data, fs)
# Wait until the file is done playing
sd.wait()
# Example usage
play_audio(speech_file_path)
def tts_file(self, text, speaker=None, file_name_or_path:Path|str=None, language="en", use_threading=False):
speech_file_path = file_name_or_path
text = self.clean_text(text)
response = self.client.audio.speech.create(
model=self.service_config.model,
voice=self.service_config.voice,
input=text,
response_format="wav"
)
response.write_to_file(speech_file_path)
return file_name_or_path

View File

@ -1 +0,0 @@
xtts_models

View File

@ -1,314 +0,0 @@
"""
project: lollms_tts
file: lollms_tts.py
author: ParisNeo
description:
This file hosts the LollmsXTTS service which provides text-to-speech functionalities using the TTS library.
"""
from pathlib import Path
from lollms.app import LollmsApplication
from lollms.paths import LollmsPaths
from lollms.utilities import PackageManager, find_first_available_file_index, add_period
from ascii_colors import ASCIIColors, trace_exception
from lollms.tts import LollmsTTS
from lollms.utilities import run_pip_in_env
from typing import List
import threading
from packaging import version
import pipmaster as pm
if version.parse(str(pm.get_installed_version("numpy"))) > version.parse(str("1.26.9")):
pm.install_version("numpy", "1.26.4")
if not pm.is_installed("pydub"):
pm.install("pydub")
import numpy as np
# Ensure required packages are installed
if not pm.is_installed("TTS"):
pm.install("TTS")
if not pm.is_installed("simpleaudio"):
pm.install("simpleaudio")
if not pm.is_installed("wave"):
pm.install("wave")
import re
from pathlib import Path
from pydub import AudioSegment
import wave
from TTS.api import TTS
import simpleaudio as sa
import time
from queue import Queue
import re
import pipmaster as pm
from lollms.config import TypedConfig, ConfigTemplate, BaseConfig
# List of common sampling rates
common_sampling_rates = [8000, 11025, 16000, 22050, 32000, 44100, 48000, 96000, 192000]
# Function to find the closest sampling rate
def closest_sampling_rate(freq, common_rates):
return min(common_rates, key=lambda x: abs(x - freq))
def xtts_install():
pm.install_or_update("tts", force_reinstall=True)
class LollmsXTTS(LollmsTTS):
def __init__(
self,
app:LollmsApplication,
output_folder:str|Path=None
):
"""
Initializes the LollmsDalle binding.
Args:
api_key (str): The API key for authentication.
output_folder (Path|str): The output folder where to put the generated data
"""
service_config = TypedConfig(
ConfigTemplate([
{
"name": "model",
"type": "str",
"value": "",
"options": [],
"help": "The model to use for text-to-speech. Options: 'alloy', 'echo', 'fable', 'nova', 'shimmer'."
},
{
"name": "voice",
"type": "str",
"value": "alloy",
"help": "The voice to use for text-to-speech. Options: 'alloy', 'echo', 'fable', 'nova', 'shimmer'."
},
]),
BaseConfig(config={
"api_key": "", # use avx2
})
)
super().__init__("lollms_xtts", app, service_config, output_folder)
voices_folder = app.lollms_paths.custom_voices_path/"xtts"
voices_folder.mkdir(exist_ok=True, parents=True)
self.voices_folders = [voices_folder] + [Path(__file__).parent/"voices"]
voices = self.get_voices()
service_config.config_template["model"]["options"]=voices
def settings_updated(self):
voices = self.get_voices()
self.service_config.config_template["model"]["options"]=voices
def __init__(self, app: LollmsApplication, voices_folders: List[str|Path], freq = 22050):
super().__init__("lollms_xtts", app)
self.freq = freq
self.generation_threads = {}
self.stop_event = threading.Event()
# Show a cool LOGO using ASCIIColors
ASCIIColors.red("")
ASCIIColors.red(" __ ___ __ __ __ __ ___ _ ")
ASCIIColors.red(" / / /___\/ / / / /\/\ / _\ \ \/ / |_| |_ ___ ")
ASCIIColors.red(" / / // // / / / / \ \ \ _____\ /| __| __/ __| ")
ASCIIColors.red("/ /___/ \_// /___/ /___/ /\/\ \_\ \_____/ \| |_| |_\__ \ ")
ASCIIColors.red("\____/\___/\____/\____/\/ \/\__/ /_/\_\\__|\__|___/ ")
# Load the TTS model
self.tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2")
self.tts.to("cuda")
self.wav_queue = Queue()
self.play_obj = None
self.thread = None
self.ready = True
def install(lollms_app: LollmsApplication):
ASCIIColors.green("LollmsXTTS installation started")
# Here you can perform installation of needed things, or create configuration files or download needed assets etc.
run_pip_in_env("TTS")
run_pip_in_env("simpleaudio")
@staticmethod
def verify(lollms_paths: LollmsPaths) -> bool:
# Verify that the service is installed either by verifying the libraries are installed or that some files or folders exist
try:
import TTS
import simpleaudio
return True
except ImportError:
return False
@staticmethod
def get(app: LollmsApplication) -> 'LollmsXTTS':
# Verify if the service is installed and if true then return an instance of LollmsXTTS
if LollmsXTTS.verify(app.lollms_paths):
return LollmsXTTS(app, app.lollms_paths.custom_voices_path, freq=app.config.xtts_freq)
else:
raise Exception("LollmsXTTS service is not installed properly.")
def get_speaker_wav(self, speaker) -> Path:
"""
Searches for the speaker file in the specified folders.
:param speaker: The name of the speaker file (without extension).
:return: The path to the speaker file if found.
:raises FileNotFoundError: If the speaker file is not found in any of the folders.
"""
for folder in self.voices_folders:
potential_speaker_wav = Path(folder) / f"{speaker}.wav"
if potential_speaker_wav.exists():
return potential_speaker_wav
raise FileNotFoundError(f"Speaker file '{speaker}.wav' not found in any of the specified folders.")
def tts_file(self, text, file_name_or_path, speaker=None, language="en") -> str:
speaker_wav = None
if speaker:
speaker_wav = self.get_speaker_wav(speaker)
else:
speaker_wav = self.get_speaker_wav("main_voice")
# Split the text into sentences
sentences = re.split('(?<=[.!?])\s+', text)
# Initialize an empty list to store audio segments
audio_segments = []
# Process sentences in chunks of less than 400 tokens
chunk = []
chunk_tokens = 0
output_path = Path(file_name_or_path)
for i, sentence in enumerate(sentences):
sentence_tokens = len(sentence.split())
if chunk_tokens + sentence_tokens > 400:
# Process the current chunk
chunk_text = " ".join(chunk)
temp_file = output_path.with_suffix(f".temp{i}.wav")
self.tts.tts_to_file(text=chunk_text, file_path=str(temp_file), speaker_wav=speaker_wav, language=language)
audio_segments.append(AudioSegment.from_wav(str(temp_file)))
# Reset the chunk
chunk = [sentence]
chunk_tokens = sentence_tokens
else:
chunk.append(sentence)
chunk_tokens += sentence_tokens
# Process the last chunk if it's not empty
if chunk:
chunk_text = " ".join(chunk)
temp_file = output_path.with_suffix(f".temp{len(sentences)}.wav")
self.tts.tts_to_file(text=chunk_text, file_path=str(temp_file), speaker_wav=speaker_wav, language=language)
audio_segments.append(AudioSegment.from_wav(str(temp_file)))
# Combine all audio segments
combined_audio = sum(audio_segments)
# Export the combined audio to the final file
combined_audio.export(file_name_or_path, format="wav")
# Clean up temporary files
for temp_file in output_path.parent.glob(f"{output_path.stem}.temp*.wav"):
temp_file.unlink()
return file_name_or_path
def tts_audio(self, text, speaker=None, file_name_or_path: Path | str | None = None, language="en", use_threading=False):
# Split text into sentences
sentences = re.split(r'(?<=[.!?]) +', text)
if speaker:
speaker_wav = self.get_speaker_wav(speaker)
else:
speaker_wav = self.get_speaker_wav("main_voice")
if use_threading:
self.stop_event.clear()
generator_thread = threading.Thread(target=self._generate_audio, args=(sentences, speaker_wav, language, file_name_or_path))
generator_thread.start()
self.thread = threading.Thread(target=self._play_audio)
self.thread.start()
else:
self.stop_event.clear()
generator_thread = threading.Thread(target=self._generate_audio, args=(sentences, speaker_wav, language, file_name_or_path))
generator_thread.start()
self._play_audio()
def _generate_audio(self, sentences, speaker_wav, language, file_name_or_path):
wav_data = []
for sentence in sentences:
if self.stop_event.is_set():
break
wav = self.tts.tts(text=sentence, speaker_wav=speaker_wav, language=language)
wav_array = np.array(wav, dtype=np.float32)
wav_array = np.int16(wav_array * 32767)
self.wav_queue.put(wav_array)
wav_data.append(wav_array)
self.wav_queue.put(None) # Signal that generation is done
if file_name_or_path:
self._save_wav(wav_data, file_name_or_path)
def _play_audio(self):
buffered_sentences = 0
buffer = []
while not self.stop_event.is_set():
wav = self.wav_queue.get()
if wav is None:
# Play any remaining buffered sentences
for buffered_wav in buffer:
# Find the closest sampling rate
closest_freq = closest_sampling_rate(self.freq, common_sampling_rates)
self.play_obj = sa.play_buffer(buffered_wav.tobytes(), 1, 2, closest_freq)
self.play_obj.wait_done()
time.sleep(0.5) # Pause between sentences
ASCIIColors.green("Audio done")
break
buffer.append(wav)
buffered_sentences += 1
if buffered_sentences >= 2:
for buffered_wav in buffer:
closest_freq = closest_sampling_rate(self.freq, common_sampling_rates)
self.play_obj = sa.play_buffer(buffered_wav.tobytes(), 1, 2, closest_freq)
self.play_obj.wait_done()
time.sleep(0.5) # Pause between sentences
buffer = []
buffered_sentences = 0
def _save_wav(self, wav_data, file_name_or_path):
with wave.open(str(file_name_or_path), 'wb') as wf:
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(self.freq)
for wav in wav_data:
wf.writeframes(wav.tobytes())
def stop(self):
self.stop_event.set()
if self.thread and self.thread.is_alive():
self.thread.join()
if self.play_obj:
self.play_obj.stop()
def get_voices(self):
# List voices from the folder
ASCIIColors.yellow("Listing voices")
voices = []
for voices_folder in self.voices_folders:
voices += [v.stem for v in voices_folder.iterdir() if v.suffix == ".wav"]
return voices
if __name__ == "__main__":
# Here do some example
app = LollmsApplication()
lollms_xtts_service = LollmsXTTS.get(app)
lollms_xtts_service.tts_file("Hello, this is a test.", "output.wav", speaker="ParisNeo_Original_voice", language="en")

File diff suppressed because one or more lines are too long

View File

@ -1 +0,0 @@
models.txt

View File

@ -1,137 +0,0 @@
# Title Ollama service
# Licence: MIT
# Author : Paris Neo
# This is a service launcher for the ollama server by Jeffrey Morgan (jmorganca)
# check it out : https://github.com/jmorganca/ollama
# Here is a copy of the LICENCE https://github.com/jmorganca/ollama/blob/main/LICENSE
# All rights are reserved
from pathlib import Path
import sys
from lollms.app import LollmsApplication
from lollms.paths import LollmsPaths
from lollms.config import TypedConfig, ConfigTemplate, BaseConfig
import time
import io
import sys
import requests
import os
import base64
import subprocess
import time
import json
import platform
from dataclasses import dataclass
from PIL import Image, PngImagePlugin
from enum import Enum
from typing import List, Dict, Any
from ascii_colors import ASCIIColors, trace_exception
from lollms.paths import LollmsPaths
from lollms.utilities import git_pull, show_yes_no_dialog
import subprocess
import platform
def verify_ollama(lollms_paths:LollmsPaths):
# Clone repository
root_dir = lollms_paths.personal_path
shared_folder = root_dir/"shared"
sd_folder = shared_folder / "auto_sd"
return sd_folder.exists()
def install_ollama(lollms_app:LollmsApplication):
import platform
import os
import shutil
import urllib.request
import subprocess
from pathlib import Path
if show_yes_no_dialog("Info","You have asked to download and install ollama on your system.\nOllama is a separate tool that servs a variaty of llms and lollms can use it as one of its bindings.\nIf you already have it installed, you can press No.\nYou can install it manually from their webite ollama.com.\nPress yes If you want to install it automatically now.\n"):
system = platform.system()
download_folder = Path.home() / "Downloads"
if system == "Windows":
url = "https://ollama.com/download/OllamaSetup.exe"
filename = "OllamaSetup.exe"
urllib.request.urlretrieve(url, download_folder / filename)
install_process = subprocess.Popen([str(download_folder / filename)])
install_process.wait()
elif system == "Linux":
url = "https://ollama.com/install.sh"
filename = "install.sh"
urllib.request.urlretrieve(url, download_folder / filename)
os.chmod(download_folder / filename, 0o755)
install_process = subprocess.Popen([str(download_folder / filename)])
install_process.wait()
elif system == "Darwin":
url = "https://ollama.com/download/Ollama-darwin.zip"
filename = "Ollama-darwin.zip"
urllib.request.urlretrieve(url, download_folder / filename)
shutil.unpack_archive(download_folder / filename, extract_dir=download_folder)
install_process = subprocess.Popen([str(download_folder / "Ollama-darwin" / "install.sh")])
install_process.wait()
else:
print("Unsupported operating system.")
class Service:
def __init__(
self,
app:LollmsApplication,
base_url="http://127.0.0.1:11434",
wait_max_retries = 5,
wait_for_service=True
):
self.base_url = base_url
# Get the current directory
lollms_paths = app.lollms_paths
self.app = app
root_dir = lollms_paths.personal_path
ASCIIColors.red(" __ _____ __ __ _____ _____ _____ __ __ _____ _____ _____ ")
ASCIIColors.red("| | | | | | | | | __| | | | | | | _ | | _ |")
ASCIIColors.red("| |__| | | |__| |__| | | |__ | | | | |__| |__| | | | | |")
ASCIIColors.red("|_____|_____|_____|_____|_|_|_|_____|_____|_____|_____|_____|__|__|_|_|_|__|__|")
ASCIIColors.red(" |_____| ")
ASCIIColors.red(" Launching ollama service by Jeffrey Morgan (jmorganca)")
ASCIIColors.red(" Integration in lollms by ParisNeo")
if not self.wait_for_service(1,False) and base_url is None:
ASCIIColors.info("Loading ollama service")
# Wait until the service is available at http://127.0.0.1:7860/
if wait_for_service:
self.wait_for_service(max_retries=wait_max_retries)
else:
ASCIIColors.warning("We are not waiting for the OLLAMA service to be up.\nThis means that you may need to wait a bit before you can use it.")
def wait_for_service(self, max_retries = 150, show_warning=True):
url = f"{self.base_url}"
# Adjust this value as needed
retries = 0
while retries < max_retries or max_retries<0:
try:
response = requests.get(url)
if response.status_code == 200:
print("Service is available.")
if self.app is not None:
self.app.success("Ollama Service is now available.")
return True
except requests.exceptions.RequestException:
pass
retries += 1
time.sleep(1)
if show_warning:
print("Service did not become available within the given time.\nThis may be a normal behavior as it depends on your system performance. Maybe you should wait a little more before using the ollama client as it is not ready yet\n")
if self.app is not None:
self.app.error("Ollama Service did not become available within the given time.")
return False

View File

@ -1,27 +0,0 @@
#!/bin/bash
# Check if miniconda3/bin/conda exists
if [ -e "$HOME/miniconda3/bin/conda" ]; then
echo "Conda is installed!"
else
echo "Conda is not installed. Please install it first."
echo Installing conda
curl -LO https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
./Miniconda3-latest-Linux-x86_64.sh -b
rm ./Miniconda3-latest-Linux-x86_64.sh
echo Done
fi
PATH="$HOME/miniconda3/bin:$PATH"
export PATH
echo "Initializing conda"
conda init --all
export PATH
echo "Installing petals"
conda create -n petals python=3.9 -y
echo "Activating petals environment"
source activate petals
pip install petals
git clone https://github.com/ParisNeo/petals_server.git
cd petals_server
pip install -e .
echo "Done"

View File

@ -1,133 +0,0 @@
# Title petals service
# Licence: MIT
# Author : Paris Neo
# This is a service launcher for the petals server by Jeffrey Morgan (jmorganca)
# check it out : https://github.com/jmorganca/petals
# Here is a copy of the LICENCE https://github.com/jmorganca/petals/blob/main/LICENSE
# All rights are reserved
from pathlib import Path
import os
import sys
from lollms.app import LollmsApplication
from lollms.paths import LollmsPaths
from lollms.config import TypedConfig, ConfigTemplate, BaseConfig
import time
import io
import sys
import requests
import os
import base64
import subprocess
import time
import json
import platform
from dataclasses import dataclass
from PIL import Image, PngImagePlugin
from enum import Enum
from typing import List, Dict, Any
from ascii_colors import ASCIIColors, trace_exception
from lollms.paths import LollmsPaths
from lollms.utilities import git_pull, show_yes_no_dialog
import subprocess
import platform
def verify_petals(lollms_paths:LollmsPaths):
# Clone repository
root_dir = lollms_paths.personal_path
shared_folder = root_dir/"shared"
sd_folder = shared_folder / "auto_sd"
return sd_folder.exists()
def install_petals(lollms_app:LollmsApplication):
if platform.system() == 'Windows':
root_path = "/mnt/"+"".join(str(Path(__file__).parent).replace("\\","/").split(":"))
if not os.path.exists('C:\\Windows\\System32\\wsl.exe'):
if not show_yes_no_dialog("warning!","No WSL is detected on your system. Do you want me to install it for you? petals won't be abble to work without wsl."):
return False
subprocess.run(['wsl', '--install', 'Ubuntu'])
subprocess.run(['wsl', 'bash', '-c', 'cp {} ~'.format( root_path + '/install_petals.sh')])
subprocess.run(['wsl', 'bash', '-c', 'cp {} ~'.format( root_path + '/run_petals.sh')])
subprocess.run(['wsl', 'bash', '~/install_petals.sh'])
else:
root_path = str(Path(__file__).parent)
home = Path.home()
subprocess.run(['cp {} {}'.format( root_path + '/install_petals.sh', home)])
subprocess.run(['cp {} {}'.format( root_path + '/run_petals.sh', home)])
subprocess.run(['bash', f'{home}/install_petals.sh'])
return True
def get_petals(lollms_app:LollmsApplication):
if verify_petals(lollms_app.lollms_paths):
ASCIIColors.success("lollms_vllm found.")
ASCIIColors.success("Loading source file...",end="")
# use importlib to load the module from the file path
ASCIIColors.success("ok")
return Service
else:
return None
class Service:
def __init__(
self,
app:LollmsApplication,
base_url="http://127.0.0.1:11434",
wait_max_retries = 5
):
self.base_url = base_url
# Get the current directory
lollms_paths = app.lollms_paths
self.app = app
root_dir = lollms_paths.personal_path
ASCIIColors.red(" __ _____ __ __ _____ _____ _____ __ __ _____ _____ _____ ")
ASCIIColors.red("| | | | | | | | | __| | | | | | | _ | | _ |")
ASCIIColors.red("| |__| | | |__| |__| | | |__ | | | | |__| |__| | | | | |")
ASCIIColors.red("|_____|_____|_____|_____|_|_|_|_____|_____|_____|_____|_____|__|__|_|_|_|__|__|")
ASCIIColors.red(" |_____| ")
ASCIIColors.red(" Launching petals service by big science")
ASCIIColors.red(" Integration in lollms by ParisNeo")
if not self.wait_for_service(1,False) and base_url is None:
ASCIIColors.info("Loading petals service")
# run petals
if platform.system() == 'Windows':
subprocess.Popen(['wsl', 'bash', '~/run_petals.sh'])
else:
subprocess.Popen(['bash', f'{Path.home()}/run_petals.sh'])
# Wait until the service is available at http://127.0.0.1:7860/
self.wait_for_service(max_retries=wait_max_retries)
def wait_for_service(self, max_retries = 150, show_warning=True):
url = f"{self.base_url}"
# Adjust this value as needed
retries = 0
while retries < max_retries or max_retries<0:
try:
response = requests.get(url)
if response.status_code == 200:
print("Service is available.")
if self.app is not None:
self.app.success("petals Service is now available.")
return True
except requests.exceptions.RequestException:
pass
retries += 1
time.sleep(1)
if show_warning:
print("Service did not become available within the given time.")
if self.app is not None:
self.app.error("petals Service did not become available within the given time.")
return False

View File

@ -1,8 +0,0 @@
#!/bin/bash
PATH="$HOME/miniconda3/bin:$PATH"
export PATH
conda activate vllm && python -m vllm.entrypoints.openai.api_server --model_name "$1" --node_name "$2" --device "$3"
# Wait for all background processes to finish
wait

View File

@ -1 +0,0 @@
models.txt

View File

@ -1,113 +0,0 @@
# Title tgi service
# Licence: MIT
# Author : Paris Neo
# This is a service launcher for the tgi server by Jeffrey Morgan (jmorganca)
# check it out : https://github.com/jmorganca/tgi
# Here is a copy of the LICENCE https://github.com/jmorganca/tgi/blob/main/LICENSE
# All rights are reserved
from pathlib import Path
import sys
from lollms.app import LollmsApplication
from lollms.paths import LollmsPaths
from lollms.config import TypedConfig, ConfigTemplate, BaseConfig
import time
import io
import sys
import requests
import os
import base64
import subprocess
import time
import json
import platform
from dataclasses import dataclass
from PIL import Image, PngImagePlugin
from enum import Enum
from typing import List, Dict, Any
from ascii_colors import ASCIIColors, trace_exception
from lollms.paths import LollmsPaths
from lollms.utilities import git_pull
import subprocess
import platform
import shutil
def verify_tgi(lollms_paths:LollmsPaths):
# Clone repository
root_dir = lollms_paths.personal_path
shared_folder = root_dir/"shared"
tgi_folder = shared_folder / "tgi"
return tgi_folder.exists()
def install_tgi(lollms_app:LollmsApplication):
root_dir = lollms_app.lollms_paths.personal_path
shared_folder = root_dir/"shared"
tgi_folder = shared_folder / "tgi"
subprocess.run(["", "https://github.com/ParisNeo/stable-diffusion-webui.git", str(tgi_folder)])
subprocess.run(["git", "clone", "https://github.com/ParisNeo/stable-diffusion-webui.git", str(tgi_folder)])
return True
class Service:
def __init__(
self,
app:LollmsApplication,
base_url="http://127.0.0.1:11434",
wait_max_retries = 5
):
self.base_url = base_url
# Get the current directory
lollms_paths = app.lollms_paths
self.app = app
root_dir = lollms_paths.personal_path
ASCIIColors.red(" _ _ _ __ __ _____ _______ _____ _____ ")
ASCIIColors.red(" | | | | | | | \/ |/ ____|__ __/ ____|_ _|")
ASCIIColors.red(" | | ___ | | | | | \ / | (___ | | | | __ | | ")
ASCIIColors.red(" | | / _ \| | | | | |\/| |\___ \ | | | | |_ | | | ")
ASCIIColors.red(" | |___| (_) | |____| |____| | | |____) | | | | |__| |_| |_ ")
ASCIIColors.red(" |______\___/|______|______|_| |_|_____/ |_| \_____|_____|")
ASCIIColors.red(" ______ ")
ASCIIColors.red(" |______| ")
ASCIIColors.red(" Launching tgi service by Hugging face")
ASCIIColors.red(" Integration in lollms by ParisNeo")
if not self.wait_for_service(1,False) and base_url is None:
ASCIIColors.info("Loading tgi service")
# run tgi
if platform.system() == 'Windows':
subprocess.Popen(['wsl', 'bash', '~/run_tgi.sh'])
else:
subprocess.Popen(['bash', f'{Path.home()}/run_tgi.sh'])
# Wait until the service is available at http://127.0.0.1:7860/
self.wait_for_service(max_retries=wait_max_retries)
def wait_for_service(self, max_retries = 150, show_warning=True):
url = f"{self.base_url}"
# Adjust this value as needed
retries = 0
while retries < max_retries or max_retries<0:
try:
response = requests.get(url)
if response.status_code == 200:
print("Service is available.")
if self.app is not None:
self.app.success("tgi Service is now available.")
return True
except requests.exceptions.RequestException:
pass
retries += 1
time.sleep(1)
if show_warning:
print("Service did not become available within the given time.")
if self.app is not None:
self.app.error("tgi Service did not become available within the given time.")
return False

View File

@ -1 +0,0 @@
models.txt

View File

@ -1,24 +0,0 @@
#!/bin/bash
# Check if miniconda3/bin/conda exists
if [ -e "$HOME/miniconda3/bin/conda" ]; then
echo "Conda is installed!"
else
echo "Conda is not installed. Please install it first."
echo Installing conda
curl -LO https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
./Miniconda3-latest-Linux-x86_64.sh -b
rm ./Miniconda3-latest-Linux-x86_64.sh
echo Done
fi
PATH="$HOME/miniconda3/bin:$PATH"
export PATH
echo "Initializing conda"
conda init --all
export PATH
echo "Installing vllm"
conda create -n vllm python=3.9 -y
echo "Activating vllm environment"
source activate vllm
pip install vllm
echo "Done"

View File

@ -1,144 +0,0 @@
# Title vLLM service
# Licence: MIT
# Author : Paris Neo
# This is a service launcher for the vllm server by Jeffrey Morgan (jmorganca)
# check it out : https://github.com/jmorganca/vllm
# Here is a copy of the LICENCE https://github.com/jmorganca/vllm/blob/main/LICENSE
# All rights are reserved
from pathlib import Path
import sys
from lollms.app import LollmsApplication
from lollms.paths import LollmsPaths
from lollms.config import TypedConfig, ConfigTemplate, BaseConfig
from lollms.utilities import url2host_port
import time
import io
import sys
import requests
import os
import base64
import subprocess
import time
import json
import platform
from dataclasses import dataclass
from PIL import Image, PngImagePlugin
from enum import Enum
from typing import List, Dict, Any
from ascii_colors import ASCIIColors, trace_exception
from lollms.paths import LollmsPaths
from lollms.utilities import git_pull, show_yes_no_dialog
import subprocess
import platform
def verify_vllm(lollms_paths:LollmsPaths):
# Clone repository
root_dir = lollms_paths.personal_path
shared_folder = root_dir/"shared"
vllm_folder = shared_folder / "vllm"
return vllm_folder.exists()
def install_vllm(lollms_app:LollmsApplication):
if platform.system() == 'Windows':
root_path = "/mnt/"+"".join(str(Path(__file__).parent).replace("\\","/").split(":"))
if not os.path.exists('C:\\Windows\\System32\\wsl.exe'):
if not show_yes_no_dialog("No WSL is detected on your system. Do you want me to install it for you? vLLM won't be abble to work without wsl."):
return False
subprocess.run(['wsl', '--install', 'Ubuntu'])
subprocess.run(['wsl', 'bash', '-c', 'mkdir ~/vllm'])
subprocess.run(['wsl', 'bash', '-c', 'cp {} ~/vllm'.format( root_path + '/install_vllm.sh')])
subprocess.run(['wsl', 'bash', '-c', 'cp {} ~/vllm'.format( root_path + '/run_vllm.sh')])
subprocess.run(['wsl', 'bash', '~/vllm/install_vllm.sh'])
else:
root_path = str(Path(__file__).parent)
vllm_installer_path = root_path/'install_vllm.sh'
vllm_run_path = root_path/'run_vllm.sh'
vllm_path = Path.home()/"vllm"
subprocess.run([f'mkdir {vllm_path}'])
subprocess.run([f'cp {vllm_installer_path} {vllm_path}'])
subprocess.run([f'cp {vllm_run_path} {vllm_path}'])
subprocess.run(['bash', f'{vllm_path}/install_vllm.sh'])
root_dir = lollms_app.lollms_paths.personal_path
shared_folder = root_dir/"shared"
vllm_folder = shared_folder / "vllm"
vllm_folder.mkdir(exist_ok=True, parents=True)
return True
def get_vllm(lollms_app:LollmsApplication):
if verify_vllm(lollms_app.lollms_paths):
ASCIIColors.success("lollms_vllm found.")
ASCIIColors.success("Loading source file...",end="")
# use importlib to load the module from the file path
ASCIIColors.success("ok")
return Service
else:
return None
class Service:
def __init__(
self,
app:LollmsApplication,
base_url="http://localhost:8000",
wait_max_retries = 5
):
self.base_url = base_url
# Get the current directory
lollms_paths = app.lollms_paths
self.app = app
root_dir = lollms_paths.personal_path
ASCIIColors.red(" __ __ __ __ __ __ ")
ASCIIColors.red(" / / ___ / / / / /\/\ / _\ __ __/ / / / /\/\ ")
ASCIIColors.red(" / / / _ \ / / / / / \ \ \ \ \ / / / / / / \ ")
ASCIIColors.red("/ /__| (_) / /___/ /___/ /\/\ \_\ \ \ V / /___/ /___/ /\/\ \ ")
ASCIIColors.red("\____/\___/\____/\____/\/ \/\__/___\_/\____/\____/\/ \/")
ASCIIColors.red(" |_____| ")
ASCIIColors.red(" Launching vllm service by vllm team")
ASCIIColors.red(" Integration in lollms by ParisNeo")
if not self.wait_for_service(1,False) and base_url is None:
ASCIIColors.info("Loading vllm service")
_, host, port = url2host_port(base_url)
# run vllm
if platform.system() == 'Windows':
#subprocess.Popen(['wsl', 'ls', '$HOME'])
subprocess.Popen(['wsl', 'bash', '$HOME/run_vllm.sh', self.app.config.vllm_model_path, host, str(port), str(self.app.config.vllm_max_model_len), str(self.app.config.vllm_gpu_memory_utilization), str(self.app.config.vllm_max_num_seqs)])
else:
subprocess.Popen(['bash', f'{Path.home()}/run_vllm.sh', self.app.config.vllm_model_path, host, str(port), str(self.app.config.vllm_max_model_len), str(self.app.config.vllm_gpu_memory_utilization), str(self.app.config.vllm_max_num_seqs)])
# Wait until the service is available at http://127.0.0.1:7860/
self.wait_for_service(max_retries=wait_max_retries)
def wait_for_service(self, max_retries = 150, show_warning=True):
url = f"{self.base_url}" if "0.0.0.0" not in self.base_url else self.base_url.replace("0.0.0.0","http://localhost")
# Adjust this value as needed
retries = 0
while retries < max_retries or max_retries<0:
try:
response = requests.get(url)
if response.status_code == 200:
print("Service is available.")
if self.app is not None:
self.app.success("vLLM Service is now available.")
return True
except requests.exceptions.RequestException:
pass
retries += 1
time.sleep(1)
if show_warning:
print("Service did not become available within the given time.\nThis may be a normal behavior as it depends on your system performance. Maybe you should wait a little more before using the vllm client as it is not ready yet\n")
if self.app is not None:
self.app.error("vLLM Service did not become available within the given time.")
return False

View File

@ -1,16 +0,0 @@
#!/bin/bash
PATH="$HOME/miniconda3/bin:$PATH"
export PATH
echo "Initializing conda"
$HOME/miniconda3/bin/conda init --all
echo "Initializing vllm with:"
echo "model :$1"
echo "host :$2"
echo "port :$3"
echo "max_model_len :$4"
echo "gpu_memory_utilization :$5"
source activate vllm && python -m vllm.entrypoints.openai.api_server --model "$1" --host "$2" --port "$3" --max-model-len "$4" --gpu-memory-utilization "$5" --max-num-seqs "$6"
# Wait for all background processes to finish
wait