diff --git a/lollms/configs/config.yaml b/lollms/configs/config.yaml
index 4ce9456..e0a699a 100644
--- a/lollms/configs/config.yaml
+++ b/lollms/configs/config.yaml
@@ -1,5 +1,5 @@
# =================== Lord Of Large Language Models Configuration file ===========================
-version: 46
+version: 48
binding_name: null
model_name: null
@@ -79,6 +79,16 @@ petals_base_url: http://0.0.0.0:8010
enable_lollms_service: false
lollms_base_url: http://0.0.0.0:1234
+# elastic search service
+elastic_search_service: false
+elastic_search_url: http://0.0.0.0:9606
+
+# vll service
+vllm_service: false
+vllm_url: http://0.0.0.0:8000
+vllm_model_path: mistralai/Mistral-7B-v0.1
+
+
# Audio
media_on: false
audio_in_language: 'en-US'
diff --git a/lollms/personality.py b/lollms/personality.py
index 72bf2a4..9f42174 100644
--- a/lollms/personality.py
+++ b/lollms/personality.py
@@ -50,12 +50,15 @@ def get_element_id(url, text):
else:
return None
-def craft_a_tag_to_specific_text(url, text):
- element_id = get_element_id(url, text)
- if element_id:
- return f'Click me to go to {text}'
- else:
- return None
+def craft_a_tag_to_specific_text(url, text, caption):
+ # Encode the text to be used in the URL
+ encoded_text = text.replace(' ', '%20')
+
+ # Construct the URL with the anchor tag
+ anchor_url = f"{url}#{encoded_text}"
+
+ # Return the anchor tag
+ return f'{caption}'
def is_package_installed(package_name):
try:
diff --git a/lollms/services/sd/lollms_sd.py b/lollms/services/sd/lollms_sd.py
index e568615..3fc7465 100644
--- a/lollms/services/sd/lollms_sd.py
+++ b/lollms/services/sd/lollms_sd.py
@@ -39,10 +39,15 @@ def verify_sd(lollms_paths:LollmsPaths):
sd_folder = shared_folder / "auto_sd"
return sd_folder.exists()
-def install_sd(lollms_paths:LollmsPaths):
- root_dir = lollms_paths.personal_path
+def install_sd(lollms_app:LollmsApplication):
+ root_dir = lollms_app.lollms_paths.personal_path
shared_folder = root_dir/"shared"
sd_folder = shared_folder / "auto_sd"
+ if sd_folder.exists():
+ if not lollms_app.YesNoMessage("I have detected that there is a previous installation of stable diffusion.\nShould I remove it and continue installing?"):
+ return
+ else:
+ sd_folder.unlink(True)
subprocess.run(["git", "clone", "https://github.com/ParisNeo/stable-diffusion-webui.git", str(sd_folder)])
subprocess.run(["git", "clone", "https://github.com/ParisNeo/SD-CN-Animation.git", str(sd_folder/"extensions/SD-CN-Animation")])
diff --git a/lollms/services/vllm/.gitignore b/lollms/services/vllm/.gitignore
new file mode 100644
index 0000000..ad347f9
--- /dev/null
+++ b/lollms/services/vllm/.gitignore
@@ -0,0 +1 @@
+models.txt
diff --git a/lollms/services/vllm/install_vllm.sh b/lollms/services/vllm/install_vllm.sh
new file mode 100644
index 0000000..8228405
--- /dev/null
+++ b/lollms/services/vllm/install_vllm.sh
@@ -0,0 +1,2 @@
+#!/bin/sh
+conda create -n myenv python=3.9 -y && conda activate myenv && pip install vllm --user
\ No newline at end of file
diff --git a/lollms/services/vllm/lollms_vllm.py b/lollms/services/vllm/lollms_vllm.py
new file mode 100644
index 0000000..c7d3941
--- /dev/null
+++ b/lollms/services/vllm/lollms_vllm.py
@@ -0,0 +1,119 @@
+# Title vLLM service
+# Licence: MIT
+# Author : Paris Neo
+# This is a service launcher for the vllm server by Jeffrey Morgan (jmorganca)
+# check it out : https://github.com/jmorganca/vllm
+# Here is a copy of the LICENCE https://github.com/jmorganca/vllm/blob/main/LICENSE
+# All rights are reserved
+
+from pathlib import Path
+import sys
+from lollms.app import LollmsApplication
+from lollms.paths import LollmsPaths
+from lollms.config import TypedConfig, ConfigTemplate, BaseConfig
+import time
+import io
+import sys
+import requests
+import os
+import base64
+import subprocess
+import time
+import json
+import platform
+from dataclasses import dataclass
+from PIL import Image, PngImagePlugin
+from enum import Enum
+from typing import List, Dict, Any
+
+from ascii_colors import ASCIIColors, trace_exception
+from lollms.paths import LollmsPaths
+from lollms.utilities import git_pull
+import subprocess
+import platform
+
+
+def verify_vllm(lollms_paths:LollmsPaths):
+ # Clone repository
+
+ root_dir = lollms_paths.personal_path
+ shared_folder = root_dir/"shared"
+ sd_folder = shared_folder / "auto_sd"
+ return sd_folder.exists()
+
+
+def install_vllm(lollms_app:LollmsApplication):
+ if platform.system() == 'Windows':
+ root_path = "/mnt/"+"".join(str(Path(__file__).parent).replace("\\","/").split(":"))
+ if not os.path.exists('C:\\Windows\\System32\\wsl.exe'):
+ if not lollms_app.YesNoMessage("No WSL is detected on your system. Do you want me to install it for you? vLLM won't be abble to work without wsl."):
+ return False
+ subprocess.run(['wsl', '--install', 'Ubuntu'])
+ subprocess.run(['wsl', 'bash', '-c', 'cp {} ~'.format( root_path + '/install_vllm.sh')])
+ subprocess.run(['wsl', 'bash', '-c', 'cp {} ~'.format( root_path + '/run_vllm.sh')])
+ subprocess.run(['wsl', 'bash', '~/install_vllm.sh'])
+ else:
+ root_path = str(Path(__file__).parent)
+ home = Path.home()
+ subprocess.run(['cp {} {}'.format( root_path + '/install_vllm.sh', home)])
+ subprocess.run(['cp {} {}'.format( root_path + '/run_vllm.sh', home)])
+ subprocess.run(['bash', f'{home}/install_vllm.sh'])
+ return True
+class Service:
+ def __init__(
+ self,
+ app:LollmsApplication,
+ base_url="http://127.0.0.1:11434",
+ wait_max_retries = 5
+ ):
+ self.base_url = base_url
+ # Get the current directory
+ lollms_paths = app.lollms_paths
+ self.app = app
+ root_dir = lollms_paths.personal_path
+
+ ASCIIColors.red(" __ __ __ __ __ __ ")
+ ASCIIColors.red(" / / ___ / / / / /\/\ / _\ __ __/ / / / /\/\ ")
+ ASCIIColors.red(" / / / _ \ / / / / / \ \ \ \ \ / / / / / / \ ")
+ ASCIIColors.red("/ /__| (_) / /___/ /___/ /\/\ \_\ \ \ V / /___/ /___/ /\/\ \ ")
+ ASCIIColors.red("\____/\___/\____/\____/\/ \/\__/___\_/\____/\____/\/ \/")
+ ASCIIColors.red(" |_____| ")
+
+ ASCIIColors.red(" Launching vllm service by vllm team")
+ ASCIIColors.red(" Integration in lollms by ParisNeo")
+
+ if not self.wait_for_service(1,False) and base_url is None:
+ ASCIIColors.info("Loading vllm service")
+
+ # run vllm
+ if platform.system() == 'Windows':
+ subprocess.Popen(['wsl', 'bash', '~/run_vllm.sh '])
+ else:
+ subprocess.Popen(['bash', f'{Path.home()}/run_vllm.sh'])
+
+ # Wait until the service is available at http://127.0.0.1:7860/
+ self.wait_for_service(max_retries=wait_max_retries)
+
+ def wait_for_service(self, max_retries = 150, show_warning=True):
+ url = f"{self.base_url}"
+ # Adjust this value as needed
+ retries = 0
+
+ while retries < max_retries or max_retries<0:
+ try:
+ response = requests.get(url)
+ if response.status_code == 200:
+ print("Service is available.")
+ if self.app is not None:
+ self.app.success("vLLM Service is now available.")
+ return True
+ except requests.exceptions.RequestException:
+ pass
+
+ retries += 1
+ time.sleep(1)
+ if show_warning:
+ print("Service did not become available within the given time.\nThis may be a normal behavior as it depends on your system performance. Maybe you should wait a little more before using the vllm client as it is not ready yet\n")
+ if self.app is not None:
+ self.app.error("vLLM Service did not become available within the given time.")
+ return False
diff --git a/lollms/services/vllm/run_vllm.sh b/lollms/services/vllm/run_vllm.sh
new file mode 100644
index 0000000..e61578f
--- /dev/null
+++ b/lollms/services/vllm/run_vllm.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+cd ~/vllm
+
+python -m vllm.entrypoints.openai.api_server --model %1
+
+# Wait for all background processes to finish
+wait
\ No newline at end of file
diff --git a/lollms/services/xtts/lollms_xtts.py b/lollms/services/xtts/lollms_xtts.py
index 26d3ddb..0e6235f 100644
--- a/lollms/services/xtts/lollms_xtts.py
+++ b/lollms/services/xtts/lollms_xtts.py
@@ -31,7 +31,7 @@ from ascii_colors import ASCIIColors, trace_exception
from lollms.paths import LollmsPaths
from lollms.utilities import git_pull
import subprocess
-
+import platform
def verify_xtts(lollms_paths:LollmsPaths):
# Clone repository
@@ -48,8 +48,14 @@ def install_xtts(lollms_app:LollmsApplication):
if not lollms_app.YesNoMessage("It looks like xtts is already installed on your system.\nDo you want to reinstall it?"):
lollms_app.error("Service installation canceled")
return
-
- PackageManager.install_package("xtts-api-server")
+
+
+ if platform.system() == 'Windows':
+ os.system(f'{Path(__file__).parent}/xtts_installer.bat')
+ elif platform.system() == 'Linux' or platform.system() == 'Darwin':
+ os.system(f'{Path(__file__).parent}/xtts_installer.sh')
+ else:
+ print("Unsupported operating system.")
xtts_folder.mkdir(exist_ok=True,parents=True)
ASCIIColors.green("XTTS server installed successfully")
@@ -107,16 +113,26 @@ class LollmsXTTS:
ASCIIColors.red("\____/\___/\____/\____/\/ \/\__/ /_/\_\\__|\__|___/ ")
ASCIIColors.red(" Forked from daswer123's XTTS server")
- ASCIIColors.red(" Integration in lollms by ParisNeo using daswer123's webapi ")
+ ASCIIColors.red(" Integration in lollms by ParisNeo using daswer123's webapi")
+ self.output_folder = app.lollms_paths.personal_outputs_path/"audio_out"
+ self.output_folder.mkdir(parents=True, exist_ok=True)
if not self.wait_for_service(1,False) and xtts_base_url is None:
ASCIIColors.info("Loading lollms_xtts")
os.environ['xtts_WEBUI_RESTARTING'] = '1' # To forbid sd webui from showing on the browser automatically
# Launch the Flask service using the appropriate script for the platform
- ASCIIColors.info("Running on windows")
- self.output_folder = app.lollms_paths.personal_outputs_path/"audio_out"
- self.output_folder.mkdir(parents=True, exist_ok=True)
- subprocess.Popen(["python", "-m", "xtts_api_server", "-o", f"{self.output_folder}", "-sf", f"{self.voice_samples_path}"])
+
+ if platform.system() == 'Windows':
+ ASCIIColors.info("Running on windows")
+ subprocess.Popen(f'{Path(__file__).parent}/xtts_run.bat {self.output_folder} {self.voice_samples_path}', cwd=Path(__file__).parent)
+ os.system()
+ elif platform.system() == 'Linux' or platform.system() == 'Darwin':
+ ASCIIColors.info("Running on Linux/macos")
+ subprocess.Popen(f'{Path(__file__).parent}/xtts_run.sh {self.output_folder} {self.voice_samples_path}', cwd=Path(__file__).parent)
+ else:
+ print("Unsupported operating system.")
+
+ # subprocess.Popen(["python", "-m", "xtts_api_server", "-o", f"{self.output_folder}", "-sf", f"{self.voice_samples_path}"])
# Wait until the service is available at http://127.0.0.1:7860/
self.wait_for_service(max_retries=max_retries)
diff --git a/lollms/services/xtts/xtts_installer.bat b/lollms/services/xtts/xtts_installer.bat
new file mode 100644
index 0000000..b3768e4
--- /dev/null
+++ b/lollms/services/xtts/xtts_installer.bat
@@ -0,0 +1,2 @@
+@echo off
+conda deactivate && conda create --name xtts --yes && conda activate xtts && pip install xtts-api-server --user
\ No newline at end of file
diff --git a/lollms/services/xtts/xtts_installer.sh b/lollms/services/xtts/xtts_installer.sh
new file mode 100644
index 0000000..995e9fa
--- /dev/null
+++ b/lollms/services/xtts/xtts_installer.sh
@@ -0,0 +1,3 @@
+conda create --name xtts
+conda activate xtts
+pip install xtts-api-server
\ No newline at end of file
diff --git a/lollms/services/xtts/xtts_run.bat b/lollms/services/xtts/xtts_run.bat
new file mode 100644
index 0000000..58d0cad
--- /dev/null
+++ b/lollms/services/xtts/xtts_run.bat
@@ -0,0 +1,2 @@
+@echo off
+conda activate xtts && python -m xtts_api_server -o %1 -sf %2
diff --git a/lollms/services/xtts/xtts_run.sh b/lollms/services/xtts/xtts_run.sh
new file mode 100644
index 0000000..699287f
--- /dev/null
+++ b/lollms/services/xtts/xtts_run.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+conda activate xtts
+conda activate xtts && python -m xtts_api_server -o %1 -sf %2