mirror of
https://github.com/ParisNeo/lollms.git
synced 2025-03-26 05:37:41 +00:00
New core upgrades
This commit is contained in:
parent
26c1b62f04
commit
91093246d2
@ -13,6 +13,7 @@ from typing import Callable
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
from functools import partial
|
||||
from flask_socketio import SocketIO
|
||||
import subprocess
|
||||
import importlib
|
||||
import sys
|
||||
@ -29,10 +30,12 @@ class LollmsApplication(LoLLMsCom):
|
||||
try_select_binding=False,
|
||||
try_select_model=False,
|
||||
callback=None,
|
||||
socketio:SocketIO=None
|
||||
) -> None:
|
||||
"""
|
||||
Creates a LOLLMS Application
|
||||
"""
|
||||
super().__init__(socketio)
|
||||
self.app_name = app_name
|
||||
self.config = config
|
||||
self.lollms_paths = lollms_paths
|
||||
|
@ -70,21 +70,13 @@ class LLMBinding:
|
||||
|
||||
self.lollmsCom = lollmsCom
|
||||
|
||||
self.add_default_configurations(binding_config)
|
||||
|
||||
binding_config.addConfigs([
|
||||
{"name":"clip_model_name","type":"str","value":'ViT-L-14/openai','options':["ViT-L-14/openai","ViT-H-14/laion2b_s32b_b79k"], "help":"Clip model to be used for images understanding"},
|
||||
{"name":"caption_model_name","type":"str","value":'blip-large','options':['blip-base', 'git-large-coco', 'blip-large','blip2-2.7b', 'blip2-flan-t5-xl'], "help":"Clip model to be used for images understanding"},
|
||||
{"name":"vqa_model_name","type":"str","value":'Salesforce/blip-vqa-capfilt-large','options':['Salesforce/blip-vqa-capfilt-large', 'Salesforce/blip-vqa-base', 'Salesforce/blip-image-captioning-large','Salesforce/blip2-opt-2.7b', 'Salesforce/blip2-flan-t5-xxl'], "help":"Salesforce question/answer model"},
|
||||
|
||||
])
|
||||
self.interrogatorStorer = None
|
||||
self.supported_file_extensions = supported_file_extensions
|
||||
self.seed = config["seed"]
|
||||
|
||||
self.configuration_file_path = lollms_paths.personal_configuration_path/"bindings"/self.binding_folder_name/f"config.yaml"
|
||||
self.configuration_file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
self.binding_config.config.file_path = self.configuration_file_path
|
||||
|
||||
self.sync_configuration(self.binding_config, lollms_paths)
|
||||
# Installation
|
||||
if (not self.configuration_file_path.exists() or installation_option==InstallOption.FORCE_INSTALL) and installation_option!=InstallOption.NEVER_INSTALL:
|
||||
self.install()
|
||||
@ -103,8 +95,20 @@ class LLMBinding:
|
||||
for models_folder in self.models_folders:
|
||||
models_folder.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def sync_configuration(self, binding_config:TypedConfig, lollms_paths:LollmsPaths):
|
||||
self.configuration_file_path = lollms_paths.personal_configuration_path/"bindings"/self.binding_folder_name/f"config.yaml"
|
||||
self.configuration_file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
binding_config.config.file_path = self.configuration_file_path
|
||||
|
||||
|
||||
|
||||
def add_default_configurations(self, binding_config:TypedConfig):
|
||||
binding_config.addConfigs([
|
||||
{"name":"clip_model_name","type":"str","value":'ViT-L-14/openai','options':["ViT-L-14/openai","ViT-H-14/laion2b_s32b_b79k"], "help":"Clip model to be used for images understanding"},
|
||||
{"name":"caption_model_name","type":"str","value":'blip-large','options':['blip-base', 'git-large-coco', 'blip-large','blip2-2.7b', 'blip2-flan-t5-xl'], "help":"Clip model to be used for images understanding"},
|
||||
{"name":"vqa_model_name","type":"str","value":'Salesforce/blip-vqa-capfilt-large','options':['Salesforce/blip-vqa-capfilt-large', 'Salesforce/blip-vqa-base', 'Salesforce/blip-image-captioning-large','Salesforce/blip2-opt-2.7b', 'Salesforce/blip2-flan-t5-xxl'], "help":"Salesforce question/answer model"},
|
||||
])
|
||||
|
||||
def InfoMessage(self, content, duration:int=4, client_id=None, verbose:bool=True):
|
||||
if self.lollmsCom:
|
||||
return self.lollmsCom.InfoMessage(content=content, duration=duration, client_id=client_id, verbose=verbose)
|
||||
|
@ -1,4 +1,5 @@
|
||||
from ascii_colors import ASCIIColors
|
||||
from flask_socketio import SocketIO
|
||||
from enum import Enum
|
||||
class NotificationType(Enum):
|
||||
"""Notification types."""
|
||||
@ -24,10 +25,13 @@ class NotificationDisplayType(Enum):
|
||||
MESSAGE_BOX = 1
|
||||
"""This is a message box."""
|
||||
|
||||
YESNO_MESSAGE = 2
|
||||
"""This is a yes not messagebox."""
|
||||
|
||||
|
||||
class LoLLMsCom:
|
||||
def __init__(self) -> None:
|
||||
pass
|
||||
def __init__(self, socketio:SocketIO=None) -> None:
|
||||
self.socketio= socketio
|
||||
def InfoMessage(self, content, duration:int=4, client_id=None, verbose:bool=True):
|
||||
self.notify(
|
||||
content,
|
||||
@ -38,6 +42,30 @@ class LoLLMsCom:
|
||||
verbose=verbose
|
||||
)
|
||||
|
||||
def YesNoMessage(self, content, duration:int=4, client_id=None, verbose:bool=True):
|
||||
infos={
|
||||
"wait":True,
|
||||
"result":False
|
||||
}
|
||||
@self.socketio.on('yesNoRes')
|
||||
def yesnores(result):
|
||||
infos["result"] = result["yesRes"]
|
||||
infos["wait"]=False
|
||||
|
||||
self.notify(
|
||||
content,
|
||||
notification_type=NotificationType.NOTIF_SUCCESS,
|
||||
duration=duration,
|
||||
client_id=client_id,
|
||||
display_type=NotificationDisplayType.YESNO_MESSAGE,
|
||||
verbose=verbose
|
||||
)
|
||||
# wait
|
||||
ASCIIColors.yellow("Waiting for yes no question to be answered")
|
||||
while infos["wait"]:
|
||||
self.socketio.sleep(1)
|
||||
return infos["result"]
|
||||
|
||||
def info(self, content, duration:int=4, client_id=None, verbose:bool=True):
|
||||
self.notify(
|
||||
content,
|
||||
|
@ -21,9 +21,10 @@ if not PackageManager.check_package_installed("cv2"):
|
||||
elif platform.system() == "Windows":
|
||||
os.system('pip install opencv-python')
|
||||
else:
|
||||
os.system('sudo apt-get update')
|
||||
os.system('sudo apt-get install libgl1-mesa-glx python3-opencv -y')
|
||||
os.system('pip install opencv-python')
|
||||
# os.system('sudo apt-get update')
|
||||
# os.system('sudo apt-get install libgl1-mesa-glx python3-opencv -y')
|
||||
# os.system('pip install opencv-python')
|
||||
try:
|
||||
import cv2
|
||||
except:
|
||||
@ -53,17 +54,18 @@ import io
|
||||
import numpy as np
|
||||
try:
|
||||
if not PackageManager.check_package_installed("sounddevice"):
|
||||
os.system("sudo apt-get install portaudio19-dev")
|
||||
# os.system("sudo apt-get install portaudio19-dev")
|
||||
PackageManager.install_package("sounddevice")
|
||||
PackageManager.install_package("wave")
|
||||
except:
|
||||
os.system("sudo apt-get install portaudio19-dev -y")
|
||||
# os.system("sudo apt-get install portaudio19-dev -y")
|
||||
PackageManager.install_package("sounddevice")
|
||||
PackageManager.install_package("wave")
|
||||
|
||||
import sounddevice as sd
|
||||
import wave
|
||||
|
||||
try:
|
||||
import sounddevice as sd
|
||||
import wave
|
||||
except:
|
||||
ASCIIColors.error("Couldn't load sound tools")
|
||||
class AudioRecorder:
|
||||
def __init__(self, socketio, filename, channels=1, sample_rate=16000, chunk_size=24678, silence_threshold=150.0, silence_duration=2, callback=None, lollmsCom=None):
|
||||
try:
|
||||
|
Loading…
x
Reference in New Issue
Block a user