From 457216c66e98ebc975f955aa69ce6bc6349b2427 Mon Sep 17 00:00:00 2001 From: saloui Date: Mon, 22 May 2023 00:52:26 +0200 Subject: [PATCH] upgraded --- backends/llama_cpp_official/models.yaml | 36 ++++++++++++------------- gpt4all_api/api.py | 3 ++- requirements.txt | 1 + requirements_dev.txt | 1 + 4 files changed, 22 insertions(+), 19 deletions(-) diff --git a/backends/llama_cpp_official/models.yaml b/backends/llama_cpp_official/models.yaml index c0208b1e..7cb0caa6 100644 --- a/backends/llama_cpp_official/models.yaml +++ b/backends/llama_cpp_official/models.yaml @@ -7,26 +7,26 @@ server: https://huggingface.co/TheBloke/OpenAssistant-SFT-7-Llama-30B-GGML/resolve/main/ sha256: 32fd44c685fbf429810db593e2db8aa42a7e1be2cd3571b6005d53b029acfcf5 - bestLlama: 'true' - description: The wizardVicuna model 13B - filename: wizard-vicuna-13B.ggml.q4_0.bin + description: Legacy version of Vicuna 7B v 1.1 Quantized on 4 bits + filename: legacy-ggml-vicuna-7B-1.1-q4_0.bin license: Non commercial - owner_link: https://huggingface.co/TheBloke - owner: TheBloke - server: https://huggingface.co/TheBloke/wizard-vicuna-13B-GGML/resolve/main/ - sha256: 32fd44c685fbf429810db593e2db8aa42a7e1be2cd3571b6005d53b029acfcf5 + owner_link: https://huggingface.co/CRD716 + owner: CRD716 + server: https://huggingface.co/CRD716/ggml-vicuna-1.1-quantized/resolve/main/ + sha256: 67efec973a81151a55e55f8e747b455354979492978b2f9f22a342c6d841e6b7 - bestLlama: 'true' - description: The wizardLM model 7B uncensored - filename: WizardLM-7B-uncensored.ggml.q4_0.bin + description: Manticore 12B quantized on 4 bits + filename: ggml-manticore-13B-q4_0.bin license: Non commercial - owner_link: https://huggingface.co/TheBloke - owner: TheBloke - server: https://huggingface.co/TheBloke/WizardLM-7B-uncensored-GGML/resolve/main/ - sha256: b1e53a3c3a9389b9c5d81e0813cfb90ebaff6acad1733fad08cd28974fa3ac30 + owner_link: https://huggingface.co/CRD716 + owner: CRD716 + server: https://huggingface.co/CRD716/manticore-13b/resolve/main/ + sha256: 910f3e73dc5797753313a950989c54a30342780311d64c3d4b8a37b12dd50336 - bestLlama: 'true' - description: The wizardLM model 7B uncensored - filename: Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_0.bin + description: Original weights of GPT4ALL + filename: gpt4all-lora-quantized-ggml.new.bin license: Non commercial - owner_link: https://huggingface.co/TheBloke - owner: TheBloke - server: https://huggingface.co/TheBloke/Wizard-Vicuna-7B-Uncensored-GGML/resolve/main/ - sha256: c31a4edd96527dcd808bcf9b99e3894065ac950747dac84ecd415a2387454e7c \ No newline at end of file + owner_link: https://huggingface.co/ParisNeo + owner: ParisNeo + server: https://huggingface.co/ParisNeo/GPT4All/resolve/main/ + sha256: da588cda4bd870b8f25ee239910de7f85a82bf25d58d7ad0b10965e877f8a1cd \ No newline at end of file diff --git a/gpt4all_api/api.py b/gpt4all_api/api.py index be9f0841..d5885e00 100644 --- a/gpt4all_api/api.py +++ b/gpt4all_api/api.py @@ -237,7 +237,8 @@ class ModelProcess: self._set_config_result['errors'].append(f"couldn't load personality:{ex}") def step_callback(self, text, message_type): - self.generation_queue.put((text,self.id, message_type)) + if message_type==0: + self.generation_queue.put((text,self.id, message_type)) def _run(self): self._rebuild_model() diff --git a/requirements.txt b/requirements.txt index 729799e2..72f433a8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ +tqdm flask flask_socketio pytest diff --git a/requirements_dev.txt b/requirements_dev.txt index 2274b692..4cf6e435 100644 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -1,3 +1,4 @@ +tqdm flask flask_socketio nomic