diff --git a/app.py b/app.py index ed2bd03f..413ca49f 100644 --- a/app.py +++ b/app.py @@ -111,6 +111,7 @@ class Gpt4AllWebUI(GPT4AllAPI): self.add_endpoint("/", "", self.index, methods=["GET"]) self.add_endpoint("/", "serve_static", self.serve_static, methods=["GET"]) self.add_endpoint("/personalities/", "serve_personalities", self.serve_personalities, methods=["GET"]) + self.add_endpoint("/outputs/", "serve_outputs", self.serve_outputs, methods=["GET"]) self.add_endpoint("/export_discussion", "export_discussion", self.export_discussion, methods=["GET"]) @@ -460,6 +461,13 @@ class Gpt4AllWebUI(GPT4AllAPI): fn = filename.split("/")[-1] return send_from_directory(path, fn) + def serve_outputs(self, filename): + root_dir = os.getcwd() + path = os.path.join(root_dir, 'outputs/')+"/".join(filename.split("/")[:-1]) + + fn = filename.split("/")[-1] + return send_from_directory(path, fn) + def export(self): return jsonify(self.db.export_to_json()) diff --git a/backends/llama_cpp_official/models.yaml b/backends/llama_cpp_official/models.yaml index d09cd346..a7ffa1d3 100644 --- a/backends/llama_cpp_official/models.yaml +++ b/backends/llama_cpp_official/models.yaml @@ -22,4 +22,11 @@ owner: TheBloke server: https://huggingface.co/TheBloke/WizardLM-7B-uncensored-GGML/resolve/main/ sha256: b1e53a3c3a9389b9c5d81e0813cfb90ebaff6acad1733fad08cd28974fa3ac30 - +- bestLlama: 'true' + description: The wizardLM model 7B uncensored + filename: Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_0.bin + license: Non commercial + link: https://huggingface.co/TheBloke/Wizard-Vicuna-7B-Uncensored-GGML/resolve/main/ + owner: TheBloke + server: https://huggingface.co/TheBloke/Wizard-Vicuna-7B-Uncensored-GGML/resolve/main/ + sha256: c31a4edd96527dcd808bcf9b99e3894065ac950747dac84ecd415a2387454e7c \ No newline at end of file diff --git a/configs/default.yaml b/configs/default.yaml index b1db8119..3672b4a0 100644 --- a/configs/default.yaml +++ b/configs/default.yaml @@ -28,4 +28,3 @@ auto_read: false use_avx2: true # By default we require using avx2 but if not supported, make sure you remove it from here use_new_ui: false # By default use old ui override_personality_model_parameters: false #if true the personality parameters are overriden by those of the configuration (may affect personality behaviour) -debug: false \ No newline at end of file diff --git a/gpt4all_api/api.py b/gpt4all_api/api.py index f80ff793..99ffbbb3 100644 --- a/gpt4all_api/api.py +++ b/gpt4all_api/api.py @@ -163,6 +163,7 @@ class ModelProcess: def rebuild_backend(self, config): try: + print(" ******************* Building Backend from main Process *************************") backend = self.load_backend(config["backend"]) print("Backend loaded successfully") except Exception as ex: @@ -175,7 +176,7 @@ class ModelProcess: def _rebuild_model(self): try: - print("Rebuilding model") + print(" ******************* Building Backend from generation Process *************************") self.backend = self.load_backend(self.config["backend"]) print("Backend loaded successfully") try: @@ -198,8 +199,10 @@ class ModelProcess: def rebuild_personality(self): try: + print(" ******************* Building Personality from main Process *************************") personality_path = f"personalities/{self.config['personality_language']}/{self.config['personality_category']}/{self.config['personality']}" - personality = AIPersonality(personality_path) + personality = AIPersonality(personality_path, run_scripts=False) + print(f" ************ Personality {personality.name} is ready (Main process) ***************************") except Exception as ex: print(f"Personality file not found or is corrupted ({personality_path}).\nPlease verify that the personality you have selected exists or select another personality. Some updates may lead to change in personality name or category, so check the personality selection in settings to be sure.") if self.config["debug"]: @@ -210,10 +213,14 @@ class ModelProcess: def _rebuild_personality(self): try: + print(" ******************* Building Personality from generation Process *************************") personality_path = f"personalities/{self.config['personality_language']}/{self.config['personality_category']}/{self.config['personality']}" self.personality = AIPersonality(personality_path) + print(f" ************ Personality {self.personality.name} is ready (generation process) ***************************") except Exception as ex: - print(f"Personality file not found or is corrupted ({personality_path}).\nPlease verify that the personality you have selected exists or select another personality. Some updates may lead to change in personality name or category, so check the personality selection in settings to be sure.") + print(f"Personality file not found or is corrupted ({personality_path}).") + print(f"Please verify that the personality you have selected exists or select another personality. Some updates may lead to change in personality name or category, so check the personality selection in settings to be sure.") + print(f"Exception: {ex}") if self.config["debug"]: print(ex) self.personality = AIPersonality() @@ -256,6 +263,7 @@ class ModelProcess: if self.personality.processor_cfg is not None: if "custom_workflow" in self.personality.processor_cfg: if self.personality.processor_cfg["custom_workflow"]: + print("Running workflow") output = self.personality.processor.run_workflow(self._generate, command[1], command[0], self.step_callback) self._callback(output) self.is_generating.value = 0