From 5a83d9c8905705c1ba777c0ee4a5b22aebe7644e Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Sat, 20 May 2023 13:16:01 +0200 Subject: [PATCH] added backend installation for linux --- gpt4all_api/api.py | 4 ++-- installations/install_backend_gpu.sh | 7 +++++++ 2 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 installations/install_backend_gpu.sh diff --git a/gpt4all_api/api.py b/gpt4all_api/api.py index 141a3c2b..255259a1 100644 --- a/gpt4all_api/api.py +++ b/gpt4all_api/api.py @@ -220,8 +220,8 @@ class ModelProcess: self._set_config_result['personality_status'] ='failed' self._set_config_result['errors'].append(f"couldn't load personality:{ex}") - def step_callback(self, text, type): - print(f"Step callback : {text}") + def step_callback(self, text, message_type): + self.generation_queue.put((text,self.id, message_type)) def _run(self): self._rebuild_model() self._rebuild_personality() diff --git a/installations/install_backend_gpu.sh b/installations/install_backend_gpu.sh new file mode 100644 index 00000000..cd4f35e4 --- /dev/null +++ b/installations/install_backend_gpu.sh @@ -0,0 +1,7 @@ +echo "this will recompile llapacpp to use your hardware with gpu enabled." +pip uninstall llama-cpp-python -y +# First we need to purge any old installation +pip cache purge +export CMAKE_ARGS="-DLLAMA_CUBLAS=on" +export FORCE_CMAKE=1 +pip install llama-cpp-python --upgrade