mirror of
https://github.com/ParisNeo/lollms-webui.git
synced 2025-01-31 00:23:52 +00:00
added backend installation for linux
This commit is contained in:
parent
210d9d2715
commit
5a83d9c890
@ -220,8 +220,8 @@ class ModelProcess:
|
||||
self._set_config_result['personality_status'] ='failed'
|
||||
self._set_config_result['errors'].append(f"couldn't load personality:{ex}")
|
||||
|
||||
def step_callback(self, text, type):
|
||||
print(f"Step callback : {text}")
|
||||
def step_callback(self, text, message_type):
|
||||
self.generation_queue.put((text,self.id, message_type))
|
||||
def _run(self):
|
||||
self._rebuild_model()
|
||||
self._rebuild_personality()
|
||||
|
7
installations/install_backend_gpu.sh
Normal file
7
installations/install_backend_gpu.sh
Normal file
@ -0,0 +1,7 @@
|
||||
echo "this will recompile llapacpp to use your hardware with gpu enabled."
|
||||
pip uninstall llama-cpp-python -y
|
||||
# First we need to purge any old installation
|
||||
pip cache purge
|
||||
export CMAKE_ARGS="-DLLAMA_CUBLAS=on"
|
||||
export FORCE_CMAKE=1
|
||||
pip install llama-cpp-python --upgrade
|
Loading…
x
Reference in New Issue
Block a user