diff --git a/configs/config.yaml b/configs/config.yaml index 64f5d54..482d756 100644 --- a/configs/config.yaml +++ b/configs/config.yaml @@ -1,5 +1,5 @@ # =================== Lord Of Large Language Multimodal Systems Configuration file =========================== -version: 67 +version: 68 binding_name: null model_name: null @@ -74,18 +74,18 @@ copy_to_clipboard_add_all_details: false # Voice service enable_voice_service: false -xtts_base_url: http://127.0.0.1:8020 +xtts_base_url: http://localhost:8020 auto_read: false current_voice: null current_language: en # Image generation service enable_sd_service: false -sd_base_url: http://127.0.0.1:7860 +sd_base_url: http://localhost:7860 # ollama service enable_ollama_service: false -ollama_base_url: http://0.0.0.0:11434 +ollama_base_url: http://localhost:11434 # petals service enable_petals_service: false @@ -102,7 +102,7 @@ elastic_search_service: false elastic_search_url: http://localhost:9200 # vll service -vllm_service: false +enable_vllm_service: false vllm_url: http://localhost:8000 vllm_model_path: TinyLlama/TinyLlama-1.1B-Chat-v1.0 diff --git a/lollms/app.py b/lollms/app.py index f73278b..6a85f43 100644 --- a/lollms/app.py +++ b/lollms/app.py @@ -134,13 +134,13 @@ class LollmsApplication(LoLLMsCom): trace_exception(ex) self.warning(f"Couldn't load Ollama") - if self.config.vllm_service: + if self.config.enable_vllm_service: try: from lollms.services.vllm.lollms_vllm import Service self.vllm = Service(self, base_url=self.config.vllm_url) except Exception as ex: trace_exception(ex) - self.warning(f"Couldn't load Ollama") + self.warning(f"Couldn't load vllm") if self.config.enable_voice_service: diff --git a/lollms/configs/config.yaml b/lollms/configs/config.yaml index 56b3385..482d756 100644 --- a/lollms/configs/config.yaml +++ b/lollms/configs/config.yaml @@ -1,5 +1,5 @@ # =================== Lord Of Large Language Multimodal Systems Configuration file =========================== -version: 66 +version: 68 binding_name: null model_name: null @@ -74,35 +74,37 @@ copy_to_clipboard_add_all_details: false # Voice service enable_voice_service: false -xtts_base_url: http://127.0.0.1:8020 +xtts_base_url: http://localhost:8020 auto_read: false current_voice: null current_language: en # Image generation service enable_sd_service: false -sd_base_url: http://127.0.0.1:7860 +sd_base_url: http://localhost:7860 # ollama service enable_ollama_service: false -ollama_base_url: http://0.0.0.0:11434 +ollama_base_url: http://localhost:11434 # petals service enable_petals_service: false -petals_base_url: http://0.0.0.0:8010 +petals_base_url: http://localhost:8064 +petals_model_path: TinyLlama/TinyLlama-1.1B-Chat-v1.0 +petals_device: cuda0 # lollms service enable_lollms_service: false -lollms_base_url: http://0.0.0.0:1234 +lollms_base_url: http://localhost:1234 # elastic search service elastic_search_service: false -elastic_search_url: http://0.0.0.0:9200 +elastic_search_url: http://localhost:9200 # vll service -vllm_service: false -vllm_url: http://0.0.0.0:8000 -vllm_model_path: mistralai/Mistral-7B-v0.1 +enable_vllm_service: false +vllm_url: http://localhost:8000 +vllm_model_path: TinyLlama/TinyLlama-1.1B-Chat-v1.0 # Audio diff --git a/lollms/server/configs/config.yaml b/lollms/server/configs/config.yaml index 64f5d54..482d756 100644 --- a/lollms/server/configs/config.yaml +++ b/lollms/server/configs/config.yaml @@ -1,5 +1,5 @@ # =================== Lord Of Large Language Multimodal Systems Configuration file =========================== -version: 67 +version: 68 binding_name: null model_name: null @@ -74,18 +74,18 @@ copy_to_clipboard_add_all_details: false # Voice service enable_voice_service: false -xtts_base_url: http://127.0.0.1:8020 +xtts_base_url: http://localhost:8020 auto_read: false current_voice: null current_language: en # Image generation service enable_sd_service: false -sd_base_url: http://127.0.0.1:7860 +sd_base_url: http://localhost:7860 # ollama service enable_ollama_service: false -ollama_base_url: http://0.0.0.0:11434 +ollama_base_url: http://localhost:11434 # petals service enable_petals_service: false @@ -102,7 +102,7 @@ elastic_search_service: false elastic_search_url: http://localhost:9200 # vll service -vllm_service: false +enable_vllm_service: false vllm_url: http://localhost:8000 vllm_model_path: TinyLlama/TinyLlama-1.1B-Chat-v1.0