diff --git a/api/__init__.py b/api/__init__.py index dad61f3e..e0233da2 100644 --- a/api/__init__.py +++ b/api/__init__.py @@ -16,10 +16,11 @@ from lollms.extension import LOLLMSExtension, ExtensionBuilder from lollms.personality import AIPersonality, PersonalityBuilder from lollms.binding import LOLLMSConfig, BindingBuilder, LLMBinding, ModelBuilder, BindingType from lollms.paths import LollmsPaths -from lollms.helpers import ASCIIColors, trace_exception, NotificationType, NotificationDisplayType +from lollms.helpers import ASCIIColors, trace_exception +from lollms.com import NotificationType, NotificationDisplayType, LoLLMsCom from lollms.app import LollmsApplication from lollms.utilities import File64BitsManager, PromptReshaper -from lollms.media import WebcamImageSender +from lollms.media import WebcamImageSender, AudioRecorder from safe_store import TextVectorizer, VectorizationMethod, VisualizationMethod import threading from tqdm import tqdm @@ -116,7 +117,7 @@ class LoLLMsAPI(LollmsApplication): def __init__(self, config:LOLLMSConfig, socketio, config_file_path:str, lollms_paths: LollmsPaths) -> None: self.socketio = socketio - super().__init__("Lollms_webui",config, lollms_paths, callback=self.process_chunk, notification_callback=self.notify) + super().__init__("Lollms_webui",config, lollms_paths, callback=self.process_chunk) self.busy = False @@ -178,9 +179,16 @@ class LoLLMsAPI(LollmsApplication): "first_chunk": True, } } - - self.webcam = WebcamImageSender(socketio) - + try: + self.webcam = WebcamImageSender(socketio) + except: + self.webcam = None + try: + rec_output_folder = lollms_paths.personal_outputs_path/"audio_rec" + rec_output_folder.mkdir(exist_ok=True, parents=True) + self.audio_cap = AudioRecorder(socketio,rec_output_folder/"rt.wav") + except: + rec_output_folder = None # ========================================================================================= # Socket IO stuff # ========================================================================================= @@ -221,7 +229,15 @@ class LoLLMsAPI(LollmsApplication): @socketio.on('stop_webcam_video_stream') def stop_webcam_video_stream(): self.webcam.stop_capture() - + + @socketio.on('start_webcam_video_stream') + def start_webcam_video_stream(): + self.webcam.start_capture() + + @socketio.on('stop_webcam_video_stream') + def stop_webcam_video_stream(): + self.webcam.stop_capture() + @socketio.on('upgrade_vectorization') def upgrade_vectorization(): @@ -711,20 +727,20 @@ class LoLLMsAPI(LollmsApplication): print(f"Creating an empty message for AI answer orientation") if self.connections[client_id]["current_discussion"]: if not self.model: - self.notify("No model selected. Please make sure you select a model before starting generation", False, client_id) + self.error("No model selected. Please make sure you select a model before starting generation", client_id = client_id) return self.new_message(client_id, self.config.user_name, "", sender_type=SENDER_TYPES.SENDER_TYPES_USER, open=True) self.socketio.sleep(0.01) else: if self.personality is None: - self.notify("Select a personality",False,None) + self.warning("Select a personality") return ASCIIColors.info(f"Building empty AI message requested by : {client_id}") # send the message to the bot print(f"Creating an empty message for AI answer orientation") if self.connections[client_id]["current_discussion"]: if not self.model: - self.notify("No model selected. Please make sure you select a model before starting generation", False, client_id) + self.error("No model selected. Please make sure you select a model before starting generation", client_id=client_id) return self.new_message(client_id, self.personality.name, "[edit this to put your ai answer start]", open=True) self.socketio.sleep(0.01) @@ -900,7 +916,7 @@ class LoLLMsAPI(LollmsApplication): self.personality.processor.callback = partial(self.process_chunk, client_id=client_id) self.personality.processor.execute_command(command, parameters) else: - self.notify("Non scripted personalities do not support commands",False,client_id) + self.warning("Non scripted personalities do not support commands",client_id=client_id) self.close_message(client_id) @socketio.on('generate_msg') def generate_msg(data): @@ -915,7 +931,7 @@ class LoLLMsAPI(LollmsApplication): if not self.model: ASCIIColors.error("Model not selected. Please select a model") - self.notify("Model not selected. Please select a model", False, client_id) + self.error("Model not selected. Please select a model", client_id=client_id) return if not self.busy: @@ -946,7 +962,7 @@ class LoLLMsAPI(LollmsApplication): #tpe = threading.Thread(target=self.start_message_generation, args=(message, message_id, client_id)) #tpe.start() else: - self.notify("I am busy. Come back later.", False, client_id) + self.error("I am busy. Come back later.", client_id=client_id) @socketio.on('generate_msg_from') def generate_msg_from(data): @@ -957,7 +973,7 @@ class LoLLMsAPI(LollmsApplication): if self.connections[client_id]["current_discussion"] is None: ASCIIColors.warning("Please select a discussion") - self.notify("Please select a discussion first", False, client_id) + self.error("Please select a discussion first", client_id=client_id) return id_ = data['id'] generation_type = data.get('msg_type',None) @@ -979,7 +995,7 @@ class LoLLMsAPI(LollmsApplication): if self.connections[client_id]["current_discussion"] is None: ASCIIColors.yellow("Please select a discussion") - self.notify("Please select a discussion", False, client_id) + self.error("Please select a discussion", client_id=client_id) return id_ = data['id'] if id_==-1: @@ -1495,15 +1511,14 @@ class LoLLMsAPI(LollmsApplication): return discussion_messages # Removes the last return - - def notify( self, content, notification_type:NotificationType=NotificationType.NOTIF_SUCCESS, - duration=4, + duration:int=4, client_id=None, - display_type:NotificationDisplayType=NotificationDisplayType.TOAST + display_type:NotificationDisplayType=NotificationDisplayType.TOAST, + verbose=True ): self.socketio.emit('notification', { 'content': content,# self.connections[client_id]["generated_text"], @@ -1513,6 +1528,16 @@ class LoLLMsAPI(LollmsApplication): }, room=client_id ) self.socketio.sleep(0.01) + if verbose: + if notification_type==NotificationType.NOTIF_SUCCESS: + ASCIIColors.success(content) + elif notification_type==NotificationType.NOTIF_INFO: + ASCIIColors.info(content) + elif notification_type==NotificationType.NOTIF_WARNING: + ASCIIColors.warning(content) + else: + ASCIIColors.red(content) + def new_message(self, client_id, @@ -1643,7 +1668,7 @@ class LoLLMsAPI(LollmsApplication): if chunk is None: return True if not client_id in list(self.connections.keys()): - self.notify("Connection lost",False, client_id) + self.error("Connection lost", client_id=client_id) return if message_type == MSG_TYPE.MSG_TYPE_STEP: ASCIIColors.info("--> Step:"+chunk) @@ -1655,13 +1680,13 @@ class LoLLMsAPI(LollmsApplication): else: ASCIIColors.error("--> Step ended:"+chunk) if message_type == MSG_TYPE.MSG_TYPE_EXCEPTION: - self.notify(chunk,False, client_id) + self.error(chunk, client_id=client_id) ASCIIColors.error("--> Exception from personality:"+chunk) if message_type == MSG_TYPE.MSG_TYPE_WARNING: - self.notify(chunk,True, client_id) + self.warning(chunk,client_id=client_id) ASCIIColors.error("--> Exception from personality:"+chunk) if message_type == MSG_TYPE.MSG_TYPE_INFO: - self.notify(chunk,True, client_id) + self.info(chunk, client_id=client_id) ASCIIColors.info("--> Info:"+chunk) if message_type == MSG_TYPE.MSG_TYPE_UI: self.update_message(client_id, "", parameters, metadata, chunk, MSG_TYPE.MSG_TYPE_UI) @@ -1828,14 +1853,14 @@ class LoLLMsAPI(LollmsApplication): def start_message_generation(self, message, message_id, client_id, is_continue=False, generation_type=None): if self.personality is None: - self.notify("Select a personality",False,None) + self.warning("Select a personality") return ASCIIColors.info(f"Text generation requested by client: {client_id}") # send the message to the bot print(f"Received message : {message.content}") if self.connections[client_id]["current_discussion"]: if not self.model: - self.notify("No model selected. Please make sure you select a model before starting generation", False, client_id) + self.error("No model selected. Please make sure you select a model before starting generation", client_id=client_id) return # First we need to send the new message ID to the client if is_continue: @@ -1894,7 +1919,7 @@ class LoLLMsAPI(LollmsApplication): #No discussion available ASCIIColors.warning("No discussion selected!!!") - self.notify("No discussion selected!!!",False, client_id) + self.error("No discussion selected!!!", client_id=client_id) print() self.busy=False diff --git a/app.py b/app.py index 2157be06..f205b3a6 100644 --- a/app.py +++ b/app.py @@ -13,7 +13,7 @@ __github__ = "https://github.com/ParisNeo/lollms-webui" __copyright__ = "Copyright 2023, " __license__ = "Apache 2.0" -__version__ ="7.5 (Beta)" +__version__ ="8.0 (Alpha)" main_repo = "https://github.com/ParisNeo/lollms-webui.git" import os @@ -26,7 +26,7 @@ import time import traceback import webbrowser from pathlib import Path -from lollms.helpers import NotificationType, NotificationDisplayType +from lollms.com import NotificationType, NotificationDisplayType from lollms.utilities import AdvancedGarbageCollector, reinstall_pytorch_with_cuda def run_update_script(args=None): update_script = Path(__file__).parent/"update_script.py" @@ -967,7 +967,7 @@ try: for per in self.mounted_personalities: per.model = None gc.collect() - self.binding = BindingBuilder().build_binding(self.config, self.lollms_paths, InstallOption.INSTALL_IF_NECESSARY, self.notify) + self.binding = BindingBuilder().build_binding(self.config, self.lollms_paths, InstallOption.INSTALL_IF_NECESSARY, app=self) self.model = None self.config.save_config() ASCIIColors.green("Binding loaded successfully") @@ -996,7 +996,7 @@ try: per.model = self.model except Exception as ex: trace_exception(ex) - self.notify("It looks like you we couldn't load the model.\nThis can hapen when you don't have enough VRAM. Please restart the program.",False,30) + self.InfoMessage("It looks like you we couldn't load the model.\nThis can hapen when you don't have enough VRAM. Please restart the program.",duration=30) else: @@ -1307,10 +1307,10 @@ try: save_db=True ) ASCIIColors.yellow("1- Exporting discussions") - self.notify("Exporting discussions") + self.info("Exporting discussions") discussions = self.db.export_all_as_markdown_list_for_vectorization() ASCIIColors.yellow("2- Adding discussions to vectorizer") - self.notify("Adding discussions to vectorizer") + self.info("Adding discussions to vectorizer") index = 0 nb_discussions = len(discussions) @@ -1321,11 +1321,11 @@ try: skill = self.learn_from_discussion(title, discussion) self.long_term_memory.add_document(title, skill, chunk_size=self.config.data_vectorization_chunk_size, overlap_size=self.config.data_vectorization_overlap_size, force_vectorize=False, add_as_a_bloc=False) ASCIIColors.yellow("3- Indexing database") - self.notify("Indexing database",True, None) + self.info("Indexing database",True, None) self.long_term_memory.index() ASCIIColors.yellow("Ready") except Exception as ex: - self.notify(f"Couldn't vectorize the database:{ex}",False, None) + self.error(f"Couldn't vectorize the database:{ex}") return jsonify({"status":True}) @@ -1536,7 +1536,7 @@ try: try: data['name']=self.config.extensions[-1] except Exception as ex: - self.notify(ex,False) + self.error(ex) return try: extension_path = self.lollms_paths.extensions_zoo_path / data['name'] @@ -1609,18 +1609,18 @@ try: return jsonify({"status":False, 'error':str(e)}) ASCIIColors.info(f"- Reinstalling binding {data['name']}...") try: - ASCIIColors.info("Unmounting binding and model") - ASCIIColors.info("Reinstalling binding") + self.info("Unmounting binding and model") + self.info("Reinstalling binding") old_bn = self.config.binding_name self.config.binding_name = data['name'] - self.binding = BindingBuilder().build_binding(self.config, self.lollms_paths, InstallOption.FORCE_INSTALL, self.notify) - ASCIIColors.success("Binding installed successfully") - self.notify("Please reboot the application so that the binding installation can be taken into consideration",True, 30, notification_type=1) + self.binding = BindingBuilder().build_binding(self.config, self.lollms_paths, InstallOption.FORCE_INSTALL, app=self) + self.success("Binding installed successfully") + self.InfoMessage("Please reboot the application so that the binding installation can be taken into consideration") del self.binding self.binding = None self.config.binding_name = old_bn if old_bn is not None: - self.binding = BindingBuilder().build_binding(self.config, self.lollms_paths, self.notify) + self.binding = BindingBuilder().build_binding(self.config, self.lollms_paths, app=self) self.model = self.binding.build_model() for per in self.mounted_personalities: per.model = self.model @@ -1646,11 +1646,11 @@ try: ASCIIColors.info("Reinstalling binding") old_bn = self.config.binding_name self.config.binding_name = data['name'] - self.binding = BindingBuilder().build_binding(self.config, self.lollms_paths, InstallOption.FORCE_INSTALL, self.notify) - self.notify("Binding reinstalled successfully") - self.notify("Please reboot the application so that the binding installation can be taken into consideration", NotificationType.NOTIF_INFO , display_type=NotificationDisplayType.MESSAGE_BOX) + self.binding = BindingBuilder().build_binding(self.config, self.lollms_paths, InstallOption.FORCE_INSTALL, app=self) + self.success("Binding reinstalled successfully") + self.InfoMessage("Please reboot the application so that the binding installation can be taken into consideration") self.config.binding_name = old_bn - self.binding = BindingBuilder().build_binding(self.config, self.lollms_paths, self.notify) + self.binding = BindingBuilder().build_binding(self.config, self.lollms_paths, app=self) self.model = self.binding.build_model() for per in self.mounted_personalities: per.model = self.model @@ -1665,7 +1665,7 @@ try: data = request.get_json() # Further processing of the data except Exception as e: - print(f"Error occurred while parsing JSON: {e}") + ASCIIColors.error(f"Error occurred while parsing JSON: {e}") return jsonify({"status":False, 'error':str(e)}) ASCIIColors.info(f"- Reinstalling binding {data['name']}...") try: @@ -1677,12 +1677,12 @@ try: ASCIIColors.info("Uninstalling binding") old_bn = self.config.binding_name self.config.binding_name = data['name'] - self.binding = BindingBuilder().build_binding(self.config, self.lollms_paths, InstallOption.NEVER_INSTALL, self.notify) + self.binding = BindingBuilder().build_binding(self.config, self.lollms_paths, InstallOption.NEVER_INSTALL, app=self) self.binding.uninstall() ASCIIColors.green("Uninstalled successful") if old_bn!=self.config.binding_name: self.config.binding_name = old_bn - self.binding = BindingBuilder().build_binding(self.config, self.lollms_paths, self.notify) + self.binding = BindingBuilder().build_binding(self.config, self.lollms_paths, app=self) self.model = self.binding.build_model() for per in self.mounted_personalities: per.model = self.model @@ -1826,7 +1826,7 @@ try: personality.model = None gc.collect() ASCIIColors.info("Reloading binding") - self.binding = BindingBuilder().build_binding(self.config, self.lollms_paths, self.notify) + self.binding = BindingBuilder().build_binding(self.config, self.lollms_paths, app=self) ASCIIColors.info("Binding loaded successfully") try: diff --git a/lollms_core b/lollms_core index ad328a8c..54591d66 160000 --- a/lollms_core +++ b/lollms_core @@ -1 +1 @@ -Subproject commit ad328a8c79f3e522955416b9ab747bdcfd01ab22 +Subproject commit 54591d660aaf93ee5f1cf25fd77e39cfe19f2a00 diff --git a/zoos/bindings_zoo b/zoos/bindings_zoo index 1b6417ef..71cc23d7 160000 --- a/zoos/bindings_zoo +++ b/zoos/bindings_zoo @@ -1 +1 @@ -Subproject commit 1b6417ef75797fbfec8c896064b01a545a31e937 +Subproject commit 71cc23d774ca5618174e93788bc6ec4af9fe24fd diff --git a/zoos/personalities_zoo b/zoos/personalities_zoo index a4a71378..11b5c75d 160000 --- a/zoos/personalities_zoo +++ b/zoos/personalities_zoo @@ -1 +1 @@ -Subproject commit a4a71378a413b44c35c01368d457751d0b3e3fab +Subproject commit 11b5c75d12d48fc91e9f66bed43684329bc314a0