mirror of
https://github.com/ParisNeo/lollms.git
synced 2025-04-08 11:24:14 +00:00
added services
This commit is contained in:
parent
a1ffac42b6
commit
2683c23fb9
@ -1,5 +1,5 @@
|
||||
# =================== Lord Of Large Language Models Configuration file ===========================
|
||||
version: 46
|
||||
version: 48
|
||||
binding_name: null
|
||||
model_name: null
|
||||
|
||||
@ -79,6 +79,16 @@ petals_base_url: http://0.0.0.0:8010
|
||||
enable_lollms_service: false
|
||||
lollms_base_url: http://0.0.0.0:1234
|
||||
|
||||
# elastic search service
|
||||
elastic_search_service: false
|
||||
elastic_search_url: http://0.0.0.0:9606
|
||||
|
||||
# vll service
|
||||
vllm_service: false
|
||||
vllm_url: http://0.0.0.0:8000
|
||||
vllm_model_path: mistralai/Mistral-7B-v0.1
|
||||
|
||||
|
||||
# Audio
|
||||
media_on: false
|
||||
audio_in_language: 'en-US'
|
||||
|
@ -50,12 +50,15 @@ def get_element_id(url, text):
|
||||
else:
|
||||
return None
|
||||
|
||||
def craft_a_tag_to_specific_text(url, text):
|
||||
element_id = get_element_id(url, text)
|
||||
if element_id:
|
||||
return f'<a href="{url}#{element_id}">Click me to go to {text}</a>'
|
||||
else:
|
||||
return None
|
||||
def craft_a_tag_to_specific_text(url, text, caption):
|
||||
# Encode the text to be used in the URL
|
||||
encoded_text = text.replace(' ', '%20')
|
||||
|
||||
# Construct the URL with the anchor tag
|
||||
anchor_url = f"{url}#{encoded_text}"
|
||||
|
||||
# Return the anchor tag
|
||||
return f'<a href="{anchor_url}" target="blanc">{caption}</a>'
|
||||
|
||||
def is_package_installed(package_name):
|
||||
try:
|
||||
|
@ -39,10 +39,15 @@ def verify_sd(lollms_paths:LollmsPaths):
|
||||
sd_folder = shared_folder / "auto_sd"
|
||||
return sd_folder.exists()
|
||||
|
||||
def install_sd(lollms_paths:LollmsPaths):
|
||||
root_dir = lollms_paths.personal_path
|
||||
def install_sd(lollms_app:LollmsApplication):
|
||||
root_dir = lollms_app.lollms_paths.personal_path
|
||||
shared_folder = root_dir/"shared"
|
||||
sd_folder = shared_folder / "auto_sd"
|
||||
if sd_folder.exists():
|
||||
if not lollms_app.YesNoMessage("I have detected that there is a previous installation of stable diffusion.\nShould I remove it and continue installing?"):
|
||||
return
|
||||
else:
|
||||
sd_folder.unlink(True)
|
||||
subprocess.run(["git", "clone", "https://github.com/ParisNeo/stable-diffusion-webui.git", str(sd_folder)])
|
||||
subprocess.run(["git", "clone", "https://github.com/ParisNeo/SD-CN-Animation.git", str(sd_folder/"extensions/SD-CN-Animation")])
|
||||
|
||||
|
1
lollms/services/vllm/.gitignore
vendored
Normal file
1
lollms/services/vllm/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
models.txt
|
2
lollms/services/vllm/install_vllm.sh
Normal file
2
lollms/services/vllm/install_vllm.sh
Normal file
@ -0,0 +1,2 @@
|
||||
#!/bin/sh
|
||||
conda create -n myenv python=3.9 -y && conda activate myenv && pip install vllm --user
|
119
lollms/services/vllm/lollms_vllm.py
Normal file
119
lollms/services/vllm/lollms_vllm.py
Normal file
@ -0,0 +1,119 @@
|
||||
# Title vLLM service
|
||||
# Licence: MIT
|
||||
# Author : Paris Neo
|
||||
# This is a service launcher for the vllm server by Jeffrey Morgan (jmorganca)
|
||||
# check it out : https://github.com/jmorganca/vllm
|
||||
# Here is a copy of the LICENCE https://github.com/jmorganca/vllm/blob/main/LICENSE
|
||||
# All rights are reserved
|
||||
|
||||
from pathlib import Path
|
||||
import sys
|
||||
from lollms.app import LollmsApplication
|
||||
from lollms.paths import LollmsPaths
|
||||
from lollms.config import TypedConfig, ConfigTemplate, BaseConfig
|
||||
import time
|
||||
import io
|
||||
import sys
|
||||
import requests
|
||||
import os
|
||||
import base64
|
||||
import subprocess
|
||||
import time
|
||||
import json
|
||||
import platform
|
||||
from dataclasses import dataclass
|
||||
from PIL import Image, PngImagePlugin
|
||||
from enum import Enum
|
||||
from typing import List, Dict, Any
|
||||
|
||||
from ascii_colors import ASCIIColors, trace_exception
|
||||
from lollms.paths import LollmsPaths
|
||||
from lollms.utilities import git_pull
|
||||
import subprocess
|
||||
import platform
|
||||
|
||||
|
||||
def verify_vllm(lollms_paths:LollmsPaths):
|
||||
# Clone repository
|
||||
|
||||
root_dir = lollms_paths.personal_path
|
||||
shared_folder = root_dir/"shared"
|
||||
sd_folder = shared_folder / "auto_sd"
|
||||
return sd_folder.exists()
|
||||
|
||||
|
||||
def install_vllm(lollms_app:LollmsApplication):
|
||||
if platform.system() == 'Windows':
|
||||
root_path = "/mnt/"+"".join(str(Path(__file__).parent).replace("\\","/").split(":"))
|
||||
if not os.path.exists('C:\\Windows\\System32\\wsl.exe'):
|
||||
if not lollms_app.YesNoMessage("No WSL is detected on your system. Do you want me to install it for you? vLLM won't be abble to work without wsl."):
|
||||
return False
|
||||
subprocess.run(['wsl', '--install', 'Ubuntu'])
|
||||
subprocess.run(['wsl', 'bash', '-c', 'cp {} ~'.format( root_path + '/install_vllm.sh')])
|
||||
subprocess.run(['wsl', 'bash', '-c', 'cp {} ~'.format( root_path + '/run_vllm.sh')])
|
||||
subprocess.run(['wsl', 'bash', '~/install_vllm.sh'])
|
||||
else:
|
||||
root_path = str(Path(__file__).parent)
|
||||
home = Path.home()
|
||||
subprocess.run(['cp {} {}'.format( root_path + '/install_vllm.sh', home)])
|
||||
subprocess.run(['cp {} {}'.format( root_path + '/run_vllm.sh', home)])
|
||||
subprocess.run(['bash', f'{home}/install_vllm.sh'])
|
||||
return True
|
||||
class Service:
|
||||
def __init__(
|
||||
self,
|
||||
app:LollmsApplication,
|
||||
base_url="http://127.0.0.1:11434",
|
||||
wait_max_retries = 5
|
||||
):
|
||||
self.base_url = base_url
|
||||
# Get the current directory
|
||||
lollms_paths = app.lollms_paths
|
||||
self.app = app
|
||||
root_dir = lollms_paths.personal_path
|
||||
|
||||
ASCIIColors.red(" __ __ __ __ __ __ ")
|
||||
ASCIIColors.red(" / / ___ / / / / /\/\ / _\ __ __/ / / / /\/\ ")
|
||||
ASCIIColors.red(" / / / _ \ / / / / / \ \ \ \ \ / / / / / / \ ")
|
||||
ASCIIColors.red("/ /__| (_) / /___/ /___/ /\/\ \_\ \ \ V / /___/ /___/ /\/\ \ ")
|
||||
ASCIIColors.red("\____/\___/\____/\____/\/ \/\__/___\_/\____/\____/\/ \/")
|
||||
ASCIIColors.red(" |_____| ")
|
||||
|
||||
ASCIIColors.red(" Launching vllm service by vllm team")
|
||||
ASCIIColors.red(" Integration in lollms by ParisNeo")
|
||||
|
||||
if not self.wait_for_service(1,False) and base_url is None:
|
||||
ASCIIColors.info("Loading vllm service")
|
||||
|
||||
# run vllm
|
||||
if platform.system() == 'Windows':
|
||||
subprocess.Popen(['wsl', 'bash', '~/run_vllm.sh '])
|
||||
else:
|
||||
subprocess.Popen(['bash', f'{Path.home()}/run_vllm.sh'])
|
||||
|
||||
# Wait until the service is available at http://127.0.0.1:7860/
|
||||
self.wait_for_service(max_retries=wait_max_retries)
|
||||
|
||||
def wait_for_service(self, max_retries = 150, show_warning=True):
|
||||
url = f"{self.base_url}"
|
||||
# Adjust this value as needed
|
||||
retries = 0
|
||||
|
||||
while retries < max_retries or max_retries<0:
|
||||
try:
|
||||
response = requests.get(url)
|
||||
if response.status_code == 200:
|
||||
print("Service is available.")
|
||||
if self.app is not None:
|
||||
self.app.success("vLLM Service is now available.")
|
||||
return True
|
||||
except requests.exceptions.RequestException:
|
||||
pass
|
||||
|
||||
retries += 1
|
||||
time.sleep(1)
|
||||
if show_warning:
|
||||
print("Service did not become available within the given time.\nThis may be a normal behavior as it depends on your system performance. Maybe you should wait a little more before using the vllm client as it is not ready yet\n")
|
||||
if self.app is not None:
|
||||
self.app.error("vLLM Service did not become available within the given time.")
|
||||
return False
|
8
lollms/services/vllm/run_vllm.sh
Normal file
8
lollms/services/vllm/run_vllm.sh
Normal file
@ -0,0 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd ~/vllm
|
||||
|
||||
python -m vllm.entrypoints.openai.api_server --model %1
|
||||
|
||||
# Wait for all background processes to finish
|
||||
wait
|
@ -31,7 +31,7 @@ from ascii_colors import ASCIIColors, trace_exception
|
||||
from lollms.paths import LollmsPaths
|
||||
from lollms.utilities import git_pull
|
||||
import subprocess
|
||||
|
||||
import platform
|
||||
|
||||
def verify_xtts(lollms_paths:LollmsPaths):
|
||||
# Clone repository
|
||||
@ -48,8 +48,14 @@ def install_xtts(lollms_app:LollmsApplication):
|
||||
if not lollms_app.YesNoMessage("It looks like xtts is already installed on your system.\nDo you want to reinstall it?"):
|
||||
lollms_app.error("Service installation canceled")
|
||||
return
|
||||
|
||||
PackageManager.install_package("xtts-api-server")
|
||||
|
||||
|
||||
if platform.system() == 'Windows':
|
||||
os.system(f'{Path(__file__).parent}/xtts_installer.bat')
|
||||
elif platform.system() == 'Linux' or platform.system() == 'Darwin':
|
||||
os.system(f'{Path(__file__).parent}/xtts_installer.sh')
|
||||
else:
|
||||
print("Unsupported operating system.")
|
||||
|
||||
xtts_folder.mkdir(exist_ok=True,parents=True)
|
||||
ASCIIColors.green("XTTS server installed successfully")
|
||||
@ -107,16 +113,26 @@ class LollmsXTTS:
|
||||
ASCIIColors.red("\____/\___/\____/\____/\/ \/\__/ /_/\_\\__|\__|___/ ")
|
||||
|
||||
ASCIIColors.red(" Forked from daswer123's XTTS server")
|
||||
ASCIIColors.red(" Integration in lollms by ParisNeo using daswer123's webapi ")
|
||||
ASCIIColors.red(" Integration in lollms by ParisNeo using daswer123's webapi")
|
||||
self.output_folder = app.lollms_paths.personal_outputs_path/"audio_out"
|
||||
self.output_folder.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if not self.wait_for_service(1,False) and xtts_base_url is None:
|
||||
ASCIIColors.info("Loading lollms_xtts")
|
||||
os.environ['xtts_WEBUI_RESTARTING'] = '1' # To forbid sd webui from showing on the browser automatically
|
||||
# Launch the Flask service using the appropriate script for the platform
|
||||
ASCIIColors.info("Running on windows")
|
||||
self.output_folder = app.lollms_paths.personal_outputs_path/"audio_out"
|
||||
self.output_folder.mkdir(parents=True, exist_ok=True)
|
||||
subprocess.Popen(["python", "-m", "xtts_api_server", "-o", f"{self.output_folder}", "-sf", f"{self.voice_samples_path}"])
|
||||
|
||||
if platform.system() == 'Windows':
|
||||
ASCIIColors.info("Running on windows")
|
||||
subprocess.Popen(f'{Path(__file__).parent}/xtts_run.bat {self.output_folder} {self.voice_samples_path}', cwd=Path(__file__).parent)
|
||||
os.system()
|
||||
elif platform.system() == 'Linux' or platform.system() == 'Darwin':
|
||||
ASCIIColors.info("Running on Linux/macos")
|
||||
subprocess.Popen(f'{Path(__file__).parent}/xtts_run.sh {self.output_folder} {self.voice_samples_path}', cwd=Path(__file__).parent)
|
||||
else:
|
||||
print("Unsupported operating system.")
|
||||
|
||||
# subprocess.Popen(["python", "-m", "xtts_api_server", "-o", f"{self.output_folder}", "-sf", f"{self.voice_samples_path}"])
|
||||
|
||||
# Wait until the service is available at http://127.0.0.1:7860/
|
||||
self.wait_for_service(max_retries=max_retries)
|
||||
|
2
lollms/services/xtts/xtts_installer.bat
Normal file
2
lollms/services/xtts/xtts_installer.bat
Normal file
@ -0,0 +1,2 @@
|
||||
@echo off
|
||||
conda deactivate && conda create --name xtts --yes && conda activate xtts && pip install xtts-api-server --user
|
3
lollms/services/xtts/xtts_installer.sh
Normal file
3
lollms/services/xtts/xtts_installer.sh
Normal file
@ -0,0 +1,3 @@
|
||||
conda create --name xtts
|
||||
conda activate xtts
|
||||
pip install xtts-api-server
|
2
lollms/services/xtts/xtts_run.bat
Normal file
2
lollms/services/xtts/xtts_run.bat
Normal file
@ -0,0 +1,2 @@
|
||||
@echo off
|
||||
conda activate xtts && python -m xtts_api_server -o %1 -sf %2
|
3
lollms/services/xtts/xtts_run.sh
Normal file
3
lollms/services/xtts/xtts_run.sh
Normal file
@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
conda activate xtts
|
||||
conda activate xtts && python -m xtts_api_server -o %1 -sf %2
|
Loading…
x
Reference in New Issue
Block a user