mirror of
https://github.com/ParisNeo/lollms.git
synced 2024-12-18 12:26:29 +00:00
upgraded lollms
This commit is contained in:
parent
fe562543b4
commit
04500070eb
42
environment.yaml
Normal file
42
environment.yaml
Normal file
@ -0,0 +1,42 @@
|
||||
name: lollms_env
|
||||
channels:
|
||||
- defaults
|
||||
- conda-forge # Adds a wider selection of packages, especially for less common ones
|
||||
dependencies:
|
||||
- python=3.11
|
||||
- numpy=1.26.*
|
||||
- pandas
|
||||
- pillow>=9.5.0
|
||||
- pyyaml
|
||||
- requests
|
||||
- rich
|
||||
- scipy
|
||||
- tqdm
|
||||
- setuptools
|
||||
- wheel
|
||||
- psutil
|
||||
- pytest
|
||||
- gitpython
|
||||
- beautifulsoup4
|
||||
- packaging
|
||||
- fastapi
|
||||
- uvicorn
|
||||
- pydantic
|
||||
- selenium
|
||||
- aiofiles
|
||||
- pip # Conda will manage pip installation
|
||||
- pip:
|
||||
- colorama
|
||||
- ascii-colors>=0.4.2
|
||||
- python-multipart
|
||||
- python-socketio
|
||||
- python-socketio[client]
|
||||
- python-socketio[asyncio_client]
|
||||
- tiktoken
|
||||
- pipmaster>=0.1.7
|
||||
- lollmsvectordb>=1.1.0
|
||||
- freedom-search>=0.1.9
|
||||
- scrapemaster>=0.2.0
|
||||
- lollms_client>=0.7.5
|
||||
- zipfile36
|
||||
- freedom_search
|
@ -244,7 +244,7 @@ async def upload_voice_file(file: UploadFile = File(...)):
|
||||
|
||||
# Save the file to disk or process it further
|
||||
contents = await file.read()
|
||||
safe_filename = f"voice_{file_path.name}"
|
||||
safe_filename = f"{file_path.name}"
|
||||
safe_file_path = lollmsElfServer.lollms_paths.custom_voices_path/safe_filename
|
||||
with safe_file_path.open("wb") as f:
|
||||
f.write(contents)
|
||||
|
52
lollms/server/endpoints/lollms_whisper.py
Normal file
52
lollms/server/endpoints/lollms_whisper.py
Normal file
@ -0,0 +1,52 @@
|
||||
"""
|
||||
project: lollms_webui
|
||||
file: lollms_xtts.py
|
||||
author: ParisNeo
|
||||
description:
|
||||
This module contains a set of FastAPI routes that provide information about the Lord of Large Language and Multimodal Systems (LoLLMs) Web UI
|
||||
application. These routes allow users to
|
||||
|
||||
"""
|
||||
from fastapi import APIRouter, Request, UploadFile, File, HTTPException
|
||||
from fastapi.responses import PlainTextResponse
|
||||
from lollms_webui import LOLLMSWebUI
|
||||
from pydantic import BaseModel
|
||||
from starlette.responses import StreamingResponse
|
||||
from lollms.types import MSG_OPERATION_TYPE
|
||||
from lollms.main_config import BaseConfig
|
||||
from lollms.utilities import find_next_available_filename, output_file_path_to_url, detect_antiprompt, remove_text_from_string, trace_exception, find_first_available_file_index, add_period, PackageManager
|
||||
from lollms.security import sanitize_path, validate_path, check_access
|
||||
from pathlib import Path
|
||||
from ascii_colors import ASCIIColors
|
||||
import os
|
||||
import platform
|
||||
|
||||
# ----------------------- Defining router and main class ------------------------------
|
||||
|
||||
router = APIRouter()
|
||||
lollmsElfServer:LOLLMSWebUI = LOLLMSWebUI.get_instance()
|
||||
|
||||
class Identification(BaseModel):
|
||||
client_id: str
|
||||
|
||||
# ----------------------- voice ------------------------------
|
||||
@router.post("/install_whisper")
|
||||
def install_whisper(data: Identification):
|
||||
check_access(lollmsElfServer, data.client_id)
|
||||
try:
|
||||
if lollmsElfServer.config.headless_server_mode:
|
||||
return {"status":False,"error":"Service installation is blocked when in headless mode for obvious security reasons!"}
|
||||
|
||||
if lollmsElfServer.config.host!="localhost" and lollmsElfServer.config.host!="127.0.0.1":
|
||||
return {"status":False,"error":"Service installation is blocked when the server is exposed outside for very obvious reasons!"}
|
||||
|
||||
lollmsElfServer.ShowBlockingMessage("Installing whisper library\nPlease stand by")
|
||||
from lollms.services.stt.whisper.lollms_whisper import install_whisper
|
||||
install_whisper(lollmsElfServer)
|
||||
ASCIIColors.success("Done")
|
||||
lollmsElfServer.HideBlockingMessage()
|
||||
return {"status":True}
|
||||
except Exception as ex:
|
||||
lollmsElfServer.HideBlockingMessage()
|
||||
lollmsElfServer.InfoMessage(f"It looks like I could not install whisper because of this error:\n{ex}")
|
||||
return {"status":False, 'error':str(ex)}
|
52
lollms/server/endpoints/lollms_xtts.py
Normal file
52
lollms/server/endpoints/lollms_xtts.py
Normal file
@ -0,0 +1,52 @@
|
||||
"""
|
||||
project: lollms_webui
|
||||
file: lollms_xtts.py
|
||||
author: ParisNeo
|
||||
description:
|
||||
This module contains a set of FastAPI routes that provide information about the Lord of Large Language and Multimodal Systems (LoLLMs) Web UI
|
||||
application. These routes allow users to
|
||||
|
||||
"""
|
||||
from fastapi import APIRouter, Request, UploadFile, File, HTTPException
|
||||
from fastapi.responses import PlainTextResponse
|
||||
from lollms_webui import LOLLMSWebUI
|
||||
from pydantic import BaseModel
|
||||
from starlette.responses import StreamingResponse
|
||||
from lollms.types import MSG_OPERATION_TYPE
|
||||
from lollms.main_config import BaseConfig
|
||||
from lollms.utilities import find_next_available_filename, output_file_path_to_url, detect_antiprompt, remove_text_from_string, trace_exception, find_first_available_file_index, add_period, PackageManager
|
||||
from lollms.security import sanitize_path, validate_path, check_access
|
||||
from pathlib import Path
|
||||
from ascii_colors import ASCIIColors
|
||||
import os
|
||||
import platform
|
||||
|
||||
# ----------------------- Defining router and main class ------------------------------
|
||||
|
||||
router = APIRouter()
|
||||
lollmsElfServer:LOLLMSWebUI = LOLLMSWebUI.get_instance()
|
||||
|
||||
class Identification(BaseModel):
|
||||
client_id: str
|
||||
|
||||
# ----------------------- voice ------------------------------
|
||||
@router.post("/install_xtts")
|
||||
def install_xtts(data: Identification):
|
||||
check_access(lollmsElfServer, data.client_id)
|
||||
try:
|
||||
if lollmsElfServer.config.headless_server_mode:
|
||||
return {"status":False,"error":"Service installation is blocked when in headless mode for obvious security reasons!"}
|
||||
|
||||
if lollmsElfServer.config.host!="localhost" and lollmsElfServer.config.host!="127.0.0.1":
|
||||
return {"status":False,"error":"Service installation is blocked when the server is exposed outside for very obvious reasons!"}
|
||||
|
||||
lollmsElfServer.ShowBlockingMessage("Installing XTTS library\nPlease stand by")
|
||||
from lollms.services.tts.xtts.lollms_xtts import xtts_install
|
||||
xtts_install(lollmsElfServer)
|
||||
ASCIIColors.success("Done")
|
||||
lollmsElfServer.HideBlockingMessage()
|
||||
return {"status":True}
|
||||
except Exception as ex:
|
||||
lollmsElfServer.HideBlockingMessage()
|
||||
lollmsElfServer.InfoMessage(f"It looks like I could not install XTT because of this error:\n{ex}")
|
||||
return {"status":False, 'error':str(ex)}
|
@ -17,17 +17,22 @@ from typing import List, Dict, Any
|
||||
from ascii_colors import ASCIIColors, trace_exception
|
||||
from lollms.paths import LollmsPaths
|
||||
import subprocess
|
||||
|
||||
import pipmaster as pm
|
||||
try:
|
||||
if not PackageManager.check_package_installed("whisper"):
|
||||
PackageManager.install_package("openai-whisper")
|
||||
if not pm.is_installed("openai-whisper"):
|
||||
pm.install("openai-whisper")
|
||||
try:
|
||||
install_conda_package("conda-forge::ffmpeg")
|
||||
except Exception as ex:
|
||||
trace_exception(ex)
|
||||
ASCIIColors.red("Couldn't install ffmpeg")
|
||||
except:
|
||||
PackageManager.install_package("git+https://github.com/openai/whisper.git")
|
||||
try:
|
||||
install_conda_package("conda-forge::ffmpeg")
|
||||
except Exception as ex:
|
||||
trace_exception(ex)
|
||||
ASCIIColors.red("Couldn't install ffmpeg")
|
||||
pm.install("git+https://github.com/openai/whisper.git")
|
||||
|
||||
|
||||
import whisper
|
||||
@ -41,12 +46,20 @@ class LollmsWhisper(LollmsSTT):
|
||||
output_path=None
|
||||
):
|
||||
super().__init__("whisper",app, model, output_path)
|
||||
self.whisper = whisper.load_model(model)
|
||||
try:
|
||||
self.whisper = whisper.load_model(model)
|
||||
except:
|
||||
ASCIIColors.red("Couldn't load whisper model!\nWhisper will be disabled")
|
||||
self.whisper = None
|
||||
self.ready = True
|
||||
|
||||
def transcribe(
|
||||
self,
|
||||
wave_path: str|Path
|
||||
)->str:
|
||||
result = self.whisper.transcribe(str(wave_path))
|
||||
return result["text"]
|
||||
if self.whisper:
|
||||
result = self.whisper.transcribe(str(wave_path))
|
||||
return result["text"]
|
||||
else:
|
||||
ASCIIColors.error("Whisper is broken")
|
||||
return ""
|
@ -34,6 +34,7 @@ import simpleaudio as sa
|
||||
import time
|
||||
from queue import Queue
|
||||
import re
|
||||
import pipmaster as pm
|
||||
|
||||
# List of common sampling rates
|
||||
common_sampling_rates = [8000, 11025, 16000, 22050, 32000, 44100, 48000, 96000, 192000]
|
||||
@ -42,6 +43,9 @@ common_sampling_rates = [8000, 11025, 16000, 22050, 32000, 44100, 48000, 96000,
|
||||
def closest_sampling_rate(freq, common_rates):
|
||||
return min(common_rates, key=lambda x: abs(x - freq))
|
||||
|
||||
def xtts_install():
|
||||
pm.install_or_update("tts", force_reinstall=True)
|
||||
|
||||
class LollmsXTTS(LollmsTTS):
|
||||
def __init__(self, app: LollmsApplication, voices_folders: List[str|Path], freq = 22050):
|
||||
super().__init__("lollms_xtts", app)
|
||||
|
21
tests/endoints_unit_tests/components_test/whisper_test.py
Normal file
21
tests/endoints_unit_tests/components_test/whisper_test.py
Normal file
@ -0,0 +1,21 @@
|
||||
# Title LollmsWhisper
|
||||
# Licence: MIT
|
||||
# Author : Paris Neo
|
||||
#
|
||||
|
||||
from pathlib import Path
|
||||
import whisper
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Create a mock LollmsApplication instance
|
||||
w = whisper.load_model("small")
|
||||
# Example usage
|
||||
audio_file_path = Path(r"E:\lollms\custom_voices\ParisNeo_Original_voice.wav")
|
||||
|
||||
if audio_file_path.exists():
|
||||
transcription = w.transcribe(str(audio_file_path))
|
||||
print("Transcription:")
|
||||
print(transcription)
|
||||
else:
|
||||
print(f"Audio file not found: {audio_file_path}")
|
Loading…
Reference in New Issue
Block a user