From b515a3864510532c83f5f8c5bf4dce6e16d9fe0f Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Mon, 9 Dec 2024 23:30:40 +0100 Subject: [PATCH] upgraded --- configs/config.yaml | 5 +++-- lollms/configs/config.yaml | 5 +++-- lollms/databases/discussions_database.py | 3 +++ lollms/databases/skills_database.py | 4 ++++ lollms/internet.py | 3 +++ lollms/personality.py | 11 ++++++++++- lollms/server/endpoints/lollms_file_system.py | 9 +++++++++ lollms/server/endpoints/lollms_rag.py | 3 +++ 8 files changed, 38 insertions(+), 5 deletions(-) diff --git a/configs/config.yaml b/configs/config.yaml index 515df20..d10478b 100644 --- a/configs/config.yaml +++ b/configs/config.yaml @@ -1,5 +1,5 @@ # =================== Lord Of Large Language Multimodal Systems Configuration file =========================== -version: 144 +version: 145 # video viewing and news recovering last_viewed_video: null @@ -280,7 +280,8 @@ audio_silenceTimer: 5000 # Data vectorization rag_databases: [] # This is the list of paths to database sources. Each database is a folder containing data -rag_vectorizer: semantic # possible values semantic, tfidf, openai +rag_vectorizer: semantic # possible values semantic, tfidf, openai, ollama +rag_service_url: "http://localhost:11434" # rag service url for ollama rag_vectorizer_model: "BAAI/bge-m3" # The model name if applicable rag_vectorizer_parameters: null # Parameters of the model in json format rag_chunk_size: 512 # number of tokens per chunk diff --git a/lollms/configs/config.yaml b/lollms/configs/config.yaml index 515df20..d10478b 100644 --- a/lollms/configs/config.yaml +++ b/lollms/configs/config.yaml @@ -1,5 +1,5 @@ # =================== Lord Of Large Language Multimodal Systems Configuration file =========================== -version: 144 +version: 145 # video viewing and news recovering last_viewed_video: null @@ -280,7 +280,8 @@ audio_silenceTimer: 5000 # Data vectorization rag_databases: [] # This is the list of paths to database sources. Each database is a folder containing data -rag_vectorizer: semantic # possible values semantic, tfidf, openai +rag_vectorizer: semantic # possible values semantic, tfidf, openai, ollama +rag_service_url: "http://localhost:11434" # rag service url for ollama rag_vectorizer_model: "BAAI/bge-m3" # The model name if applicable rag_vectorizer_parameters: null # Parameters of the model in json format rag_chunk_size: 512 # number of tokens per chunk diff --git a/lollms/databases/discussions_database.py b/lollms/databases/discussions_database.py index 8a1cd73..094f960 100644 --- a/lollms/databases/discussions_database.py +++ b/lollms/databases/discussions_database.py @@ -783,6 +783,9 @@ class Discussion: elif self.lollms.config.rag_vectorizer=="openai": from lollmsvectordb.lollms_vectorizers.openai_vectorizer import OpenAIVectorizer vectorizer = OpenAIVectorizer(self.lollms.config.rag_vectorizer_model, self.lollms.config.rag_vectorizer_openai_key) + elif self.config.rag_vectorizer == "ollama": + from lollmsvectordb.lollms_vectorizers.ollama_vectorizer import OllamaVectorizer + v = OllamaVectorizer(self.lollms.config.rag_vectorizer_model, self.lollms.config.rag_service_url) self.vectorizer = VectorDatabase( self.discussion_rag_folder/"db.sqli", diff --git a/lollms/databases/skills_database.py b/lollms/databases/skills_database.py index 3ab0f77..cd141c4 100644 --- a/lollms/databases/skills_database.py +++ b/lollms/databases/skills_database.py @@ -22,6 +22,10 @@ class SkillsLibrary: elif vectorizer == "openai": from lollmsvectordb.lollms_vectorizers.openai_vectorizer import OpenAIVectorizer v = OpenAIVectorizer() + elif self.config.rag_vectorizer == "ollama": + from lollmsvectordb.lollms_vectorizers.ollama_vectorizer import OllamaVectorizer + v = OllamaVectorizer() + else: from lollmsvectordb.lollms_vectorizers.semantic_vectorizer import SemanticVectorizer v = SemanticVectorizer("BAAI/bge-m3") diff --git a/lollms/internet.py b/lollms/internet.py index 3babcd3..c9b40e3 100644 --- a/lollms/internet.py +++ b/lollms/internet.py @@ -347,6 +347,9 @@ def internet_search_with_vectorization(query, chromedriver_path=None, internet_n elif vectorizer == "openai": from lollmsvectordb.lollms_vectorizers.openai_vectorizer import OpenAIVectorizer v = OpenAIVectorizer() + elif vectorizer == "ollama": + from lollmsvectordb.lollms_vectorizers.ollama_vectorizer import OllamaVectorizer + v = OllamaVectorizer() vectorizer = VectorDatabase("", v, TikTokenTokenizer(), internet_vectorization_chunk_size, internet_vectorization_overlap_size) diff --git a/lollms/personality.py b/lollms/personality.py index f3dd587..b4d5456 100644 --- a/lollms/personality.py +++ b/lollms/personality.py @@ -1379,6 +1379,9 @@ Use this structure: elif self.config.rag_vectorizer == "openai": from lollmsvectordb.lollms_vectorizers.openai_vectorizer import OpenAIVectorizer v = OpenAIVectorizer(api_key=self.config.rag_vectorizer_openai_key) + elif self.config.rag_vectorizer == "ollama": + from lollmsvectordb.lollms_vectorizers.ollama_vectorizer import OllamaVectorizer + v = OllamaVectorizer(self.config.rag_vectorizer_model, self.config.rag_service_url) self.persona_data_vectorizer = VectorDatabase(self.database_path, v, TikTokenTokenizer(), self.config.rag_chunk_size, self.config.rag_overlap) @@ -1543,7 +1546,10 @@ Use this structure: self.ShowBlockingMessage("Processing file\nPlease wait ...\nUsing open ai vectorizer") from lollmsvectordb.lollms_vectorizers.openai_vectorizer import OpenAIVectorizer v = OpenAIVectorizer() - + elif self.config.rag_vectorizer == "ollama": + self.ShowBlockingMessage("Processing file\nPlease wait ...\nUsing ollama vectorizer") + from lollmsvectordb.lollms_vectorizers.ollama_vectorizer import OllamaVectorizer + v = OllamaVectorizer(self.config.rag_vectorizer_model, self.config.rag_service_url) self.vectorizer = VectorDatabase( client.discussion.discussion_rag_folder/"db.sqli", v, @@ -3741,6 +3747,9 @@ transition-all duration-300 ease-in-out"> elif vectorizer == "openai": from lollmsvectordb.lollms_vectorizers.openai_vectorizer import OpenAIVectorizer v = OpenAIVectorizer() + elif self.config.rag_vectorizer == "ollama": + from lollmsvectordb.lollms_vectorizers.ollama_vectorizer import OllamaVectorizer + v = OllamaVectorizer(self.config.rag_vectorizer_model, self.config.rag_service_url) vectorizer = VectorDatabase("", v, TikTokenTokenizer(), self.config.rag_chunk_size, self.config.rag_overlap) vectorizer.add_document(title, text, url) diff --git a/lollms/server/endpoints/lollms_file_system.py b/lollms/server/endpoints/lollms_file_system.py index 9468d67..e225c70 100644 --- a/lollms/server/endpoints/lollms_file_system.py +++ b/lollms/server/endpoints/lollms_file_system.py @@ -142,6 +142,9 @@ def select_rag_database(client) -> Optional[Dict[str, Path]]: elif lollmsElfServer.config.rag_vectorizer == "openai": from lollmsvectordb.lollms_vectorizers.openai_vectorizer import OpenAIVectorizer v = OpenAIVectorizer(lollmsElfServer.config.rag_vectorizer_openai_key) + elif lollmsElfServer.config.rag_vectorizer == "ollama": + from lollmsvectordb.lollms_vectorizers.ollama_vectorizer import OllamaVectorizer + v = OllamaVectorizer(lollmsElfServer.config.rag_vectorizer_model, lollmsElfServer.config.rag_service_url) vdb = VectorDatabase(Path(folder_path)/f"{db_name}.sqlite", v, lollmsElfServer.model if lollmsElfServer.model else TikTokenTokenizer()) # Get all files in the folder @@ -277,6 +280,9 @@ def toggle_mount_rag_database(database_infos: MountDatabase): elif lollmsElfServer.config.rag_vectorizer == "openai": from lollmsvectordb.lollms_vectorizers.openai_vectorizer import OpenAIVectorizer v = OpenAIVectorizer(lollmsElfServer.config.rag_vectorizer_openai_key) + elif lollmsElfServer.config.rag_vectorizer == "ollama": + from lollmsvectordb.lollms_vectorizers.ollama_vectorizer import OllamaVectorizer + v = OllamaVectorizer(lollmsElfServer.config.rag_vectorizer_model, lollmsElfServer.config.rag_service_url) vdb = VectorDatabase(Path(path)/f"{database_infos.database_name}.sqlite", v, lollmsElfServer.model if lollmsElfServer.model else TikTokenTokenizer(), chunk_size=lollmsElfServer.config.rag_chunk_size, clean_chunks=lollmsElfServer.config.rag_clean_chunks, n_neighbors=lollmsElfServer.config.rag_n_chunks) @@ -344,6 +350,9 @@ async def vectorize_folder(database_infos: FolderInfos): elif lollmsElfServer.config.rag_vectorizer == "openai": from lollmsvectordb.lollms_vectorizers.openai_vectorizer import OpenAIVectorizer v = OpenAIVectorizer(lollmsElfServer.config.rag_vectorizer_openai_key) + elif lollmsElfServer.config.rag_vectorizer == "ollama": + from lollmsvectordb.lollms_vectorizers.ollama_vectorizer import OllamaVectorizer + v = OllamaVectorizer(lollmsElfServer.config.rag_vectorizer_model, lollmsElfServer.config.rag_service_url) vector_db_path = Path(folder_path)/f"{db_name}.sqlite" diff --git a/lollms/server/endpoints/lollms_rag.py b/lollms/server/endpoints/lollms_rag.py index b39b8bb..c525b75 100644 --- a/lollms/server/endpoints/lollms_rag.py +++ b/lollms/server/endpoints/lollms_rag.py @@ -75,6 +75,9 @@ def get_user_vectorizer(user_key: str): elif lollmsElfServer.config.rag_vectorizer == "openai": from lollmsvectordb.lollms_vectorizers.openai_vectorizer import OpenAIVectorizer v = OpenAIVectorizer(lollmsElfServer.config.rag_vectorizer_openai_key) + elif lollmsElfServer.config.rag_vectorizer == "ollama": + from lollmsvectordb.lollms_vectorizers.ollama_vectorizer import OllamaVectorizer + v = OllamaVectorizer(lollmsElfServer.config.rag_vectorizer_model, lollmsElfServer.config.rag_service_url) return VectorDatabase( "",