From 04500070eba0729a40c45b17e42a6c6089b29549 Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Sun, 15 Sep 2024 01:37:00 +0200 Subject: [PATCH] upgraded lollms --- environment.yaml | 42 +++++++++++++++ lollms/server/endpoints/lollms_tts.py | 2 +- lollms/server/endpoints/lollms_whisper.py | 52 +++++++++++++++++++ lollms/server/endpoints/lollms_xtts.py | 52 +++++++++++++++++++ lollms/services/stt/whisper/lollms_whisper.py | 27 +++++++--- lollms/services/tts/xtts/lollms_xtts.py | 4 ++ .../components_test/whisper_test.py | 21 ++++++++ 7 files changed, 192 insertions(+), 8 deletions(-) create mode 100644 environment.yaml create mode 100644 lollms/server/endpoints/lollms_whisper.py create mode 100644 lollms/server/endpoints/lollms_xtts.py create mode 100644 tests/endoints_unit_tests/components_test/whisper_test.py diff --git a/environment.yaml b/environment.yaml new file mode 100644 index 0000000..2c28d1d --- /dev/null +++ b/environment.yaml @@ -0,0 +1,42 @@ +name: lollms_env +channels: + - defaults + - conda-forge # Adds a wider selection of packages, especially for less common ones +dependencies: + - python=3.11 + - numpy=1.26.* + - pandas + - pillow>=9.5.0 + - pyyaml + - requests + - rich + - scipy + - tqdm + - setuptools + - wheel + - psutil + - pytest + - gitpython + - beautifulsoup4 + - packaging + - fastapi + - uvicorn + - pydantic + - selenium + - aiofiles + - pip # Conda will manage pip installation + - pip: + - colorama + - ascii-colors>=0.4.2 + - python-multipart + - python-socketio + - python-socketio[client] + - python-socketio[asyncio_client] + - tiktoken + - pipmaster>=0.1.7 + - lollmsvectordb>=1.1.0 + - freedom-search>=0.1.9 + - scrapemaster>=0.2.0 + - lollms_client>=0.7.5 + - zipfile36 + - freedom_search diff --git a/lollms/server/endpoints/lollms_tts.py b/lollms/server/endpoints/lollms_tts.py index 76bad11..d1db67c 100644 --- a/lollms/server/endpoints/lollms_tts.py +++ b/lollms/server/endpoints/lollms_tts.py @@ -244,7 +244,7 @@ async def upload_voice_file(file: UploadFile = File(...)): # Save the file to disk or process it further contents = await file.read() - safe_filename = f"voice_{file_path.name}" + safe_filename = f"{file_path.name}" safe_file_path = lollmsElfServer.lollms_paths.custom_voices_path/safe_filename with safe_file_path.open("wb") as f: f.write(contents) diff --git a/lollms/server/endpoints/lollms_whisper.py b/lollms/server/endpoints/lollms_whisper.py new file mode 100644 index 0000000..fbfe2d3 --- /dev/null +++ b/lollms/server/endpoints/lollms_whisper.py @@ -0,0 +1,52 @@ +""" +project: lollms_webui +file: lollms_xtts.py +author: ParisNeo +description: + This module contains a set of FastAPI routes that provide information about the Lord of Large Language and Multimodal Systems (LoLLMs) Web UI + application. These routes allow users to + +""" +from fastapi import APIRouter, Request, UploadFile, File, HTTPException +from fastapi.responses import PlainTextResponse +from lollms_webui import LOLLMSWebUI +from pydantic import BaseModel +from starlette.responses import StreamingResponse +from lollms.types import MSG_OPERATION_TYPE +from lollms.main_config import BaseConfig +from lollms.utilities import find_next_available_filename, output_file_path_to_url, detect_antiprompt, remove_text_from_string, trace_exception, find_first_available_file_index, add_period, PackageManager +from lollms.security import sanitize_path, validate_path, check_access +from pathlib import Path +from ascii_colors import ASCIIColors +import os +import platform + +# ----------------------- Defining router and main class ------------------------------ + +router = APIRouter() +lollmsElfServer:LOLLMSWebUI = LOLLMSWebUI.get_instance() + +class Identification(BaseModel): + client_id: str + +# ----------------------- voice ------------------------------ +@router.post("/install_whisper") +def install_whisper(data: Identification): + check_access(lollmsElfServer, data.client_id) + try: + if lollmsElfServer.config.headless_server_mode: + return {"status":False,"error":"Service installation is blocked when in headless mode for obvious security reasons!"} + + if lollmsElfServer.config.host!="localhost" and lollmsElfServer.config.host!="127.0.0.1": + return {"status":False,"error":"Service installation is blocked when the server is exposed outside for very obvious reasons!"} + + lollmsElfServer.ShowBlockingMessage("Installing whisper library\nPlease stand by") + from lollms.services.stt.whisper.lollms_whisper import install_whisper + install_whisper(lollmsElfServer) + ASCIIColors.success("Done") + lollmsElfServer.HideBlockingMessage() + return {"status":True} + except Exception as ex: + lollmsElfServer.HideBlockingMessage() + lollmsElfServer.InfoMessage(f"It looks like I could not install whisper because of this error:\n{ex}") + return {"status":False, 'error':str(ex)} diff --git a/lollms/server/endpoints/lollms_xtts.py b/lollms/server/endpoints/lollms_xtts.py new file mode 100644 index 0000000..0fdae31 --- /dev/null +++ b/lollms/server/endpoints/lollms_xtts.py @@ -0,0 +1,52 @@ +""" +project: lollms_webui +file: lollms_xtts.py +author: ParisNeo +description: + This module contains a set of FastAPI routes that provide information about the Lord of Large Language and Multimodal Systems (LoLLMs) Web UI + application. These routes allow users to + +""" +from fastapi import APIRouter, Request, UploadFile, File, HTTPException +from fastapi.responses import PlainTextResponse +from lollms_webui import LOLLMSWebUI +from pydantic import BaseModel +from starlette.responses import StreamingResponse +from lollms.types import MSG_OPERATION_TYPE +from lollms.main_config import BaseConfig +from lollms.utilities import find_next_available_filename, output_file_path_to_url, detect_antiprompt, remove_text_from_string, trace_exception, find_first_available_file_index, add_period, PackageManager +from lollms.security import sanitize_path, validate_path, check_access +from pathlib import Path +from ascii_colors import ASCIIColors +import os +import platform + +# ----------------------- Defining router and main class ------------------------------ + +router = APIRouter() +lollmsElfServer:LOLLMSWebUI = LOLLMSWebUI.get_instance() + +class Identification(BaseModel): + client_id: str + +# ----------------------- voice ------------------------------ +@router.post("/install_xtts") +def install_xtts(data: Identification): + check_access(lollmsElfServer, data.client_id) + try: + if lollmsElfServer.config.headless_server_mode: + return {"status":False,"error":"Service installation is blocked when in headless mode for obvious security reasons!"} + + if lollmsElfServer.config.host!="localhost" and lollmsElfServer.config.host!="127.0.0.1": + return {"status":False,"error":"Service installation is blocked when the server is exposed outside for very obvious reasons!"} + + lollmsElfServer.ShowBlockingMessage("Installing XTTS library\nPlease stand by") + from lollms.services.tts.xtts.lollms_xtts import xtts_install + xtts_install(lollmsElfServer) + ASCIIColors.success("Done") + lollmsElfServer.HideBlockingMessage() + return {"status":True} + except Exception as ex: + lollmsElfServer.HideBlockingMessage() + lollmsElfServer.InfoMessage(f"It looks like I could not install XTT because of this error:\n{ex}") + return {"status":False, 'error':str(ex)} diff --git a/lollms/services/stt/whisper/lollms_whisper.py b/lollms/services/stt/whisper/lollms_whisper.py index bc0e4d6..62d92d7 100644 --- a/lollms/services/stt/whisper/lollms_whisper.py +++ b/lollms/services/stt/whisper/lollms_whisper.py @@ -17,17 +17,22 @@ from typing import List, Dict, Any from ascii_colors import ASCIIColors, trace_exception from lollms.paths import LollmsPaths import subprocess - +import pipmaster as pm try: - if not PackageManager.check_package_installed("whisper"): - PackageManager.install_package("openai-whisper") + if not pm.is_installed("openai-whisper"): + pm.install("openai-whisper") try: install_conda_package("conda-forge::ffmpeg") except Exception as ex: trace_exception(ex) ASCIIColors.red("Couldn't install ffmpeg") except: - PackageManager.install_package("git+https://github.com/openai/whisper.git") + try: + install_conda_package("conda-forge::ffmpeg") + except Exception as ex: + trace_exception(ex) + ASCIIColors.red("Couldn't install ffmpeg") + pm.install("git+https://github.com/openai/whisper.git") import whisper @@ -41,12 +46,20 @@ class LollmsWhisper(LollmsSTT): output_path=None ): super().__init__("whisper",app, model, output_path) - self.whisper = whisper.load_model(model) + try: + self.whisper = whisper.load_model(model) + except: + ASCIIColors.red("Couldn't load whisper model!\nWhisper will be disabled") + self.whisper = None self.ready = True def transcribe( self, wave_path: str|Path )->str: - result = self.whisper.transcribe(str(wave_path)) - return result["text"] + if self.whisper: + result = self.whisper.transcribe(str(wave_path)) + return result["text"] + else: + ASCIIColors.error("Whisper is broken") + return "" \ No newline at end of file diff --git a/lollms/services/tts/xtts/lollms_xtts.py b/lollms/services/tts/xtts/lollms_xtts.py index ce91ac6..19b22e2 100644 --- a/lollms/services/tts/xtts/lollms_xtts.py +++ b/lollms/services/tts/xtts/lollms_xtts.py @@ -34,6 +34,7 @@ import simpleaudio as sa import time from queue import Queue import re +import pipmaster as pm # List of common sampling rates common_sampling_rates = [8000, 11025, 16000, 22050, 32000, 44100, 48000, 96000, 192000] @@ -42,6 +43,9 @@ common_sampling_rates = [8000, 11025, 16000, 22050, 32000, 44100, 48000, 96000, def closest_sampling_rate(freq, common_rates): return min(common_rates, key=lambda x: abs(x - freq)) +def xtts_install(): + pm.install_or_update("tts", force_reinstall=True) + class LollmsXTTS(LollmsTTS): def __init__(self, app: LollmsApplication, voices_folders: List[str|Path], freq = 22050): super().__init__("lollms_xtts", app) diff --git a/tests/endoints_unit_tests/components_test/whisper_test.py b/tests/endoints_unit_tests/components_test/whisper_test.py new file mode 100644 index 0000000..91e9043 --- /dev/null +++ b/tests/endoints_unit_tests/components_test/whisper_test.py @@ -0,0 +1,21 @@ +# Title LollmsWhisper +# Licence: MIT +# Author : Paris Neo +# + +from pathlib import Path +import whisper + + +if __name__ == "__main__": + # Create a mock LollmsApplication instance + w = whisper.load_model("small") + # Example usage + audio_file_path = Path(r"E:\lollms\custom_voices\ParisNeo_Original_voice.wav") + + if audio_file_path.exists(): + transcription = w.transcribe(str(audio_file_path)) + print("Transcription:") + print(transcription) + else: + print(f"Audio file not found: {audio_file_path}")