From 462cd27e4ff6453ccb056a3ca191a3a731547472 Mon Sep 17 00:00:00 2001 From: saloui Date: Fri, 30 Jun 2023 16:24:41 +0200 Subject: [PATCH 1/5] fixed --- web/src/components/MarkdownRenderer.vue | 1 - 1 file changed, 1 deletion(-) diff --git a/web/src/components/MarkdownRenderer.vue b/web/src/components/MarkdownRenderer.vue index 3958a8eb..0efa4be9 100644 --- a/web/src/components/MarkdownRenderer.vue +++ b/web/src/components/MarkdownRenderer.vue @@ -15,7 +15,6 @@ import 'highlight.js/styles/tokyo-night-dark.css'; import hljs from 'highlight.js/lib/common'; - import 'highlight.js/styles/tomorrow-night-blue.css'; import 'highlight.js/styles/tokyo-night-dark.css'; import attrs from 'markdown-it-attrs'; From 3c38f47f47eff838d1eef3f652f64b96c2d8a6ff Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Fri, 30 Jun 2023 19:11:09 +0200 Subject: [PATCH 2/5] removed unnecessary stuff --- api/binding.py | 88 ------------------------------------------------ api/langchain.py | 84 --------------------------------------------- 2 files changed, 172 deletions(-) delete mode 100644 api/binding.py delete mode 100644 api/langchain.py diff --git a/api/binding.py b/api/binding.py deleted file mode 100644 index d97ee0a1..00000000 --- a/api/binding.py +++ /dev/null @@ -1,88 +0,0 @@ -###### -# Project : lollms-webui -# File : binding.py -# Author : ParisNeo with the help of the community -# Supported by Nomic-AI -# license : Apache 2.0 -# Description : -# This is an interface class for lollms-webui bindings. -###### -from pathlib import Path -from typing import Callable -import inspect -import yaml -import sys - -__author__ = "parisneo" -__github__ = "https://github.com/ParisNeo/lollms-webui" -__copyright__ = "Copyright 2023, " -__license__ = "Apache 2.0" - - -class LLMBinding: - - file_extension='*.bin' - binding_path = Path(__file__).parent - def __init__(self, config:dict, inline:bool) -> None: - self.config = config - self.inline = inline - - - def generate(self, - prompt:str, - n_predict: int = 128, - new_text_callback: Callable[[str], None] = None, - verbose: bool = False, - **gpt_params ): - """Generates text out of a prompt - This should ber implemented by child class - - Args: - prompt (str): The prompt to use for generation - n_predict (int, optional): Number of tokens to prodict. Defaults to 128. - new_text_callback (Callable[[str], None], optional): A callback function that is called everytime a new text element is generated. Defaults to None. - verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False. - """ - pass - def tokenize(self, prompt): - """ - Tokenizes the given prompt using the model's tokenizer. - - Args: - prompt (str): The input prompt to be tokenized. - - Returns: - list: A list of tokens representing the tokenized prompt. - """ - pass - - def detokenize(self, tokens_list): - """ - Detokenizes the given list of tokens using the model's tokenizer. - - Args: - tokens_list (list): A list of tokens to be detokenized. - - Returns: - str: The detokenized text as a string. - """ - pass - - @staticmethod - def list_models(config:dict): - """Lists the models for this binding - """ - models_dir = Path('./models')/config["binding_name"] # replace with the actual path to the models folder - return [f.name for f in models_dir.glob(LLMBinding.file_extension)] - - @staticmethod - def get_available_models(): - # Create the file path relative to the child class's directory - binding_path = Path(__file__).parent - file_path = binding_path/"models.yaml" - - with open(file_path, 'r') as file: - yaml_data = yaml.safe_load(file) - - return yaml_data - diff --git a/api/langchain.py b/api/langchain.py deleted file mode 100644 index c97e30b5..00000000 --- a/api/langchain.py +++ /dev/null @@ -1,84 +0,0 @@ -try: - from langchain.llms.base import LLM -except ImportError: - raise ImportError( - 'To use the ctransformers.langchain module, please install the ' - '`langchain` python package: `pip install langchain`') - -from typing import Any, Dict, Optional, Sequence - -from pydantic import root_validator -from langchain.callbacks.manager import CallbackManagerForLLMRun - -from api.binding import LLMBinding - - -class GenericBinding(LLM): - """Wrapper around All compatible LLM interfaces. - Thanks to Marella for providing the base for this work. - To follow him, here is his github profile: - - To use, you should have the `langchain` python package installed. - """ - - client: Any #: :meta private: - - model: str - """The path to a model file or directory or the name of a Hugging Face Hub - model repo.""" - - model_type: Optional[str] = None - """The model type.""" - - model_file: Optional[str] = None - """The name of the model file in repo or directory.""" - - config: Optional[Dict[str, Any]] = None - """The config parameters.""" - - lib: Optional[Any] = None - """The path to a shared library or one of `avx2`, `avx`, `basic`.""" - - @property - def _identifying_params(self) -> Dict[str, Any]: - """Get the identifying parameters.""" - return { - 'model': self.model, - 'model_type': self.model_type, - 'model_file': self.model_file, - 'config': self.config, - } - - @property - def _llm_type(self) -> str: - """Return type of llm.""" - return 'generic_binding' - - @root_validator() - def validate_environment(cls, values: Dict) -> Dict: - """Validate and load model from a local file or remote repo.""" - config = values['config'] or {} - values['client'] = LLMBinding(config, True) - return values - - def _call( - self, - prompt: str, - stop: Optional[Sequence[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - ) -> str: - """Generate text from a prompt. - - Args: - prompt: The prompt to generate text from. - stop: A list of sequences to stop generation when encountered. - - Returns: - The generated text. - """ - text = [] - for chunk in self.client(prompt, stop=stop, stream=True): - text.append(chunk) - if run_manager: - run_manager.on_llm_new_token(chunk, verbose=self.verbose) - return ''.join(text) \ No newline at end of file From 822b6cc61f7c4effcb27e5ad04f942252ae1cb46 Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Fri, 30 Jun 2023 22:09:47 +0200 Subject: [PATCH 3/5] enhanced logging of exceptions --- api/__init__.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/api/__init__.py b/api/__init__.py index be313d08..59b39e73 100644 --- a/api/__init__.py +++ b/api/__init__.py @@ -28,6 +28,7 @@ import traceback import sys from lollms.console import MainMenu import urllib +import traceback __author__ = "parisneo" __github__ = "https://github.com/ParisNeo/lollms-webui" @@ -719,6 +720,8 @@ class LoLLMsAPPI(): 1 : a notification message 2 : A hidden message """ + if message_type == MSG_TYPE.MSG_TYPE_STEP: + ASCIIColors.info("--> Step:"+chunk) if message_type == MSG_TYPE.MSG_TYPE_STEP_START: ASCIIColors.info("--> Step started:"+chunk) if message_type == MSG_TYPE.MSG_TYPE_STEP_END: @@ -797,7 +800,12 @@ class LoLLMsAPPI(): output = self.personality.processor.run_workflow( prompt, full_prompt, self.process_chunk) self.process_chunk(output, MSG_TYPE.MSG_TYPE_FULL) except Exception as ex: + # Catch the exception and get the traceback as a list of strings + traceback_lines = traceback.format_exception(type(ex), ex, ex.__traceback__) + # Join the traceback lines into a single string + traceback_text = ''.join(traceback_lines) ASCIIColors.error(f"Workflow run failed.\nError:{ex}") + ASCIIColors.error(traceback_text) self.process_chunk(f"Workflow run failed\nError:{ex}", MSG_TYPE.MSG_TYPE_EXCEPTION) print("Finished executing the workflow") return From deb2d2d9d2a51efdd854cb302e7bd31cce37035b Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Fri, 30 Jun 2023 23:29:41 +0200 Subject: [PATCH 4/5] upgraded exception reporting --- app.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/app.py b/app.py index e00cb854..bdae389e 100644 --- a/app.py +++ b/app.py @@ -507,7 +507,13 @@ class LoLLMsWebUI(LoLLMsAPPI): try: self.model = self.binding.build_model() except Exception as ex: - print(f"Couldn't load model: [{ex}]") + # Catch the exception and get the traceback as a list of strings + traceback_lines = traceback.format_exception(type(ex), ex, ex.__traceback__) + + # Join the traceback lines into a single string + traceback_text = ''.join(traceback_lines) + ASCIIColors.error(f"Couldn't load model: [{ex}]") + ASCIIColors.error(traceback_text) return jsonify({ "status":False, 'error':str(ex)}) print("update_settings : New model selected") From 510aa8dba9384a7fd8fa816742f54672abaadadc Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Sat, 1 Jul 2023 00:14:51 +0200 Subject: [PATCH 5/5] bugfix --- api/__init__.py | 112 +++++++++++++++++++++++++++++------------------- 1 file changed, 69 insertions(+), 43 deletions(-) diff --git a/api/__init__.py b/api/__init__.py index 59b39e73..71e1e12b 100644 --- a/api/__init__.py +++ b/api/__init__.py @@ -445,6 +445,19 @@ class LoLLMsAPPI(): ASCIIColors.green(f"{self.lollms_paths.personal_path}") + @socketio.on('continue_generate_msg_from') + def handle_connection(data): + message_id = int(data['id']) + message = data["prompt"] + self.current_user_message_id = message_id + tpe = threading.Thread(target=self.start_message_generation, args=(message, message_id)) + tpe.start() + # generation status + self.generating=False + ASCIIColors.blue(f"Your personal data is stored here :",end="") + ASCIIColors.green(f"{self.lollms_paths.personal_path}") + + def rebuild_personalities(self): loaded = self.mounted_personalities loaded_names = [f"{p.language}/{p.category}/{p.personality_folder_name}" for p in loaded] @@ -646,7 +659,7 @@ class LoLLMsAPPI(): self.condition_chatbot() return timestamp - def prepare_query(self, message_id=-1): + def prepare_query(self, message_id=-1, is_continue=False): messages = self.current_discussion.get_messages() self.full_message_list = [] for message in messages: @@ -659,17 +672,20 @@ class LoLLMsAPPI(): else: break - if self.personality.processor is not None: - preprocessed_prompt = self.personality.processor.process_model_input(message["content"]) - else: - preprocessed_prompt = message["content"] - if preprocessed_prompt is not None: - self.full_message_list.append(self.personality.user_message_prefix+preprocessed_prompt+self.personality.link_text+self.personality.ai_message_prefix) - else: - self.full_message_list.append(self.personality.user_message_prefix+message["content"]+self.personality.link_text+self.personality.ai_message_prefix) - - link_text = self.personality.link_text + if not is_continue: + if self.personality.processor is not None: + preprocessed_prompt = self.personality.processor.process_model_input(message["content"]) + else: + preprocessed_prompt = message["content"] + + if preprocessed_prompt is not None: + self.full_message_list.append(self.personality.user_message_prefix+preprocessed_prompt+self.personality.link_text+self.personality.ai_message_prefix) + else: + self.full_message_list.append(self.personality.user_message_prefix+message["content"]+self.personality.link_text+self.personality.ai_message_prefix) + else: + self.full_message_list.append(self.personality.ai_message_prefix+message["content"]) + discussion_messages = self.personality.personality_conditioning+ link_text.join(self.full_message_list) @@ -851,41 +867,44 @@ class LoLLMsAPPI(): output = "" return output - def start_message_generation(self, message, message_id): + def start_message_generation(self, message, message_id, is_continue=False): ASCIIColors.info(f"Text generation requested by client: {self.current_room_id}") # send the message to the bot print(f"Received message : {message}") if self.current_discussion: # First we need to send the new message ID to the client - self.current_ai_message_id = self.current_discussion.add_message( - self.personality.name, - "", - parent = self.current_user_message_id, - binding = self.config["binding_name"], - model = self.config["model_name"], - personality = self.config["personalities"][self.config["active_personality_id"]] - ) # first the content is empty, but we'll fill it at the end - self.socketio.emit('infos', - { - "status":'generation_started', - "type": "input_message_infos", - "bot": self.personality.name, - "user": self.personality.user_name, - "message":message,#markdown.markdown(message), - "user_message_id": self.current_user_message_id, - "ai_message_id": self.current_ai_message_id, + if is_continue: + self.current_ai_message_id = message_id + else: + self.current_ai_message_id = self.current_discussion.add_message( + self.personality.name, + "", + parent = self.current_user_message_id, + binding = self.config["binding_name"], + model = self.config["model_name"], + personality = self.config["personalities"][self.config["active_personality_id"]] + ) # first the content is empty, but we'll fill it at the end + self.socketio.emit('infos', + { + "status":'generation_started', + "type": "input_message_infos", + "bot": self.personality.name, + "user": self.personality.user_name, + "message":message,#markdown.markdown(message), + "user_message_id": self.current_user_message_id, + "ai_message_id": self.current_ai_message_id, - 'binding': self.current_discussion.current_message_binding, - 'model': self.current_discussion.current_message_model, - 'personality': self.current_discussion.current_message_personality, - 'created_at': self.current_discussion.current_message_created_at, - 'finished_generating_at': self.current_discussion.current_message_finished_generating_at, - }, room=self.current_room_id + 'binding': self.current_discussion.current_message_binding, + 'model': self.current_discussion.current_message_model, + 'personality': self.current_discussion.current_message_personality, + 'created_at': self.current_discussion.current_message_created_at, + 'finished_generating_at': self.current_discussion.current_message_finished_generating_at, + }, room=self.current_room_id ) self.socketio.sleep(0) # prepare query and reception - self.discussion_messages, self.current_message = self.prepare_query(message_id) + self.discussion_messages, self.current_message = self.prepare_query(message_id, is_continue) self.prepare_reception() self.generating = True self.generate(self.discussion_messages, self.current_message, n_predict = self.config['n_predict'], callback=self.process_chunk) @@ -921,14 +940,21 @@ class LoLLMsAPPI(): ) self.socketio.sleep(0) - print() - print("## Done ##") - print() + ASCIIColors.success(f" ╔══════════════════════════════════════════════════╗ ") + ASCIIColors.success(f" ║ Done ║ ") + ASCIIColors.success(f" ╚══════════════════════════════════════════════════╝ ") else: - #No discussion available - print("No discussion selected!!!") - print("## Done ##") - print() self.cancel_gen = False + #No discussion available + ASCIIColors.warning("No discussion selected!!!") + self.socketio.emit('message', { + 'data': "No discussion selected!!!", + 'user_message_id':self.current_user_message_id, + 'ai_message_id':self.current_ai_message_id, + 'discussion_id':0, + 'message_type': MSG_TYPE.MSG_TYPE_EXCEPTION.value + }, room=self.current_room_id + ) + print() return "" \ No newline at end of file