mirror of
https://github.com/ParisNeo/lollms-webui.git
synced 2024-12-18 20:17:50 +00:00
upgraded
This commit is contained in:
parent
7e49f81a5c
commit
457216c66e
@ -7,26 +7,26 @@
|
||||
server: https://huggingface.co/TheBloke/OpenAssistant-SFT-7-Llama-30B-GGML/resolve/main/
|
||||
sha256: 32fd44c685fbf429810db593e2db8aa42a7e1be2cd3571b6005d53b029acfcf5
|
||||
- bestLlama: 'true'
|
||||
description: The wizardVicuna model 13B
|
||||
filename: wizard-vicuna-13B.ggml.q4_0.bin
|
||||
description: Legacy version of Vicuna 7B v 1.1 Quantized on 4 bits
|
||||
filename: legacy-ggml-vicuna-7B-1.1-q4_0.bin
|
||||
license: Non commercial
|
||||
owner_link: https://huggingface.co/TheBloke
|
||||
owner: TheBloke
|
||||
server: https://huggingface.co/TheBloke/wizard-vicuna-13B-GGML/resolve/main/
|
||||
sha256: 32fd44c685fbf429810db593e2db8aa42a7e1be2cd3571b6005d53b029acfcf5
|
||||
owner_link: https://huggingface.co/CRD716
|
||||
owner: CRD716
|
||||
server: https://huggingface.co/CRD716/ggml-vicuna-1.1-quantized/resolve/main/
|
||||
sha256: 67efec973a81151a55e55f8e747b455354979492978b2f9f22a342c6d841e6b7
|
||||
- bestLlama: 'true'
|
||||
description: The wizardLM model 7B uncensored
|
||||
filename: WizardLM-7B-uncensored.ggml.q4_0.bin
|
||||
description: Manticore 12B quantized on 4 bits
|
||||
filename: ggml-manticore-13B-q4_0.bin
|
||||
license: Non commercial
|
||||
owner_link: https://huggingface.co/TheBloke
|
||||
owner: TheBloke
|
||||
server: https://huggingface.co/TheBloke/WizardLM-7B-uncensored-GGML/resolve/main/
|
||||
sha256: b1e53a3c3a9389b9c5d81e0813cfb90ebaff6acad1733fad08cd28974fa3ac30
|
||||
owner_link: https://huggingface.co/CRD716
|
||||
owner: CRD716
|
||||
server: https://huggingface.co/CRD716/manticore-13b/resolve/main/
|
||||
sha256: 910f3e73dc5797753313a950989c54a30342780311d64c3d4b8a37b12dd50336
|
||||
- bestLlama: 'true'
|
||||
description: The wizardLM model 7B uncensored
|
||||
filename: Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_0.bin
|
||||
description: Original weights of GPT4ALL
|
||||
filename: gpt4all-lora-quantized-ggml.new.bin
|
||||
license: Non commercial
|
||||
owner_link: https://huggingface.co/TheBloke
|
||||
owner: TheBloke
|
||||
server: https://huggingface.co/TheBloke/Wizard-Vicuna-7B-Uncensored-GGML/resolve/main/
|
||||
sha256: c31a4edd96527dcd808bcf9b99e3894065ac950747dac84ecd415a2387454e7c
|
||||
owner_link: https://huggingface.co/ParisNeo
|
||||
owner: ParisNeo
|
||||
server: https://huggingface.co/ParisNeo/GPT4All/resolve/main/
|
||||
sha256: da588cda4bd870b8f25ee239910de7f85a82bf25d58d7ad0b10965e877f8a1cd
|
@ -237,7 +237,8 @@ class ModelProcess:
|
||||
self._set_config_result['errors'].append(f"couldn't load personality:{ex}")
|
||||
|
||||
def step_callback(self, text, message_type):
|
||||
self.generation_queue.put((text,self.id, message_type))
|
||||
if message_type==0:
|
||||
self.generation_queue.put((text,self.id, message_type))
|
||||
|
||||
def _run(self):
|
||||
self._rebuild_model()
|
||||
|
@ -1,3 +1,4 @@
|
||||
tqdm
|
||||
flask
|
||||
flask_socketio
|
||||
pytest
|
||||
|
@ -1,3 +1,4 @@
|
||||
tqdm
|
||||
flask
|
||||
flask_socketio
|
||||
nomic
|
||||
|
Loading…
Reference in New Issue
Block a user