diff --git a/configs/config.yaml b/configs/config.yaml index 4ce9456..b57c1a9 100644 --- a/configs/config.yaml +++ b/configs/config.yaml @@ -1,5 +1,5 @@ -# =================== Lord Of Large Language Models Configuration file =========================== -version: 46 +# =================== Lord Of Large Language Multimodal Systems Configuration file =========================== +version: 49 binding_name: null model_name: null @@ -79,6 +79,16 @@ petals_base_url: http://0.0.0.0:8010 enable_lollms_service: false lollms_base_url: http://0.0.0.0:1234 +# elastic search service +elastic_search_service: false +elastic_search_url: http://0.0.0.0:9606 + +# vll service +vllm_service: false +vllm_url: http://0.0.0.0:8000 +vllm_model_path: mistralai/Mistral-7B-v0.1 + + # Audio media_on: false audio_in_language: 'en-US' @@ -114,3 +124,8 @@ positive_boost: null negative_boost: null force_output_language_to_be: null fun_mode: False + + +# webui configurations +show_code_of_conduct: true +activate_audio_infos: true diff --git a/lollms/configs/config.yaml b/lollms/configs/config.yaml index e0a699a..b57c1a9 100644 --- a/lollms/configs/config.yaml +++ b/lollms/configs/config.yaml @@ -1,5 +1,5 @@ -# =================== Lord Of Large Language Models Configuration file =========================== -version: 48 +# =================== Lord Of Large Language Multimodal Systems Configuration file =========================== +version: 49 binding_name: null model_name: null @@ -124,3 +124,8 @@ positive_boost: null negative_boost: null force_output_language_to_be: null fun_mode: False + + +# webui configurations +show_code_of_conduct: true +activate_audio_infos: true diff --git a/lollms/services/ollama/lollms_ollama.py b/lollms/services/ollama/lollms_ollama.py index 3f9606e..dba52f3 100644 --- a/lollms/services/ollama/lollms_ollama.py +++ b/lollms/services/ollama/lollms_ollama.py @@ -64,7 +64,8 @@ class Service: self, app:LollmsApplication, base_url="http://127.0.0.1:11434", - wait_max_retries = 5 + wait_max_retries = 5, + wait_for_service=True ): self.base_url = base_url # Get the current directory @@ -91,7 +92,10 @@ class Service: subprocess.Popen(['bash', f'{Path.home()}/run_ollama.sh']) # Wait until the service is available at http://127.0.0.1:7860/ - self.wait_for_service(max_retries=wait_max_retries) + if wait_for_service: + self.wait_for_service(max_retries=wait_max_retries) + else: + ASCIIColors.warning("We are not waiting for the OLLAMA service to be up.\nThis means that you may need to wait a bit before you can use it.") def wait_for_service(self, max_retries = 150, show_warning=True): url = f"{self.base_url}" diff --git a/lollms/services/sd/lollms_sd.py b/lollms/services/sd/lollms_sd.py index 3fc7465..ea19eff 100644 --- a/lollms/services/sd/lollms_sd.py +++ b/lollms/services/sd/lollms_sd.py @@ -204,7 +204,8 @@ class LollmsSD: username=None, password=None, auto_sd_base_url=None, - share=False + share=False, + wait_for_service=True ): if auto_sd_base_url=="" or auto_sd_base_url=="http://127.0.0.1:7860": auto_sd_base_url = None @@ -264,7 +265,10 @@ class LollmsSD: ASCIIColors.success("Launching Auto1111's SD succeeded") # Wait until the service is available at http://127.0.0.1:7860/ - self.wait_for_service(max_retries=max_retries) + if wait_for_service: + self.wait_for_service(max_retries=max_retries) + else: + ASCIIColors.warning("We are not waiting for the SD service to be up.\nThis means that you may need to wait a bit before you can use it.") self.default_sampler = sampler self.default_steps = steps