mirror of
https://github.com/ParisNeo/lollms-webui.git
synced 2025-02-20 09:16:15 +00:00
added gpu tool
This commit is contained in:
parent
7babe3d311
commit
7b09dd1c01
@ -38,7 +38,9 @@ class LLAMACPP(GPTBackend):
|
||||
if seed <=0:
|
||||
seed = random.randint(1, 2**31)
|
||||
|
||||
self.model = Llama(model_path=f"./models/llama_cpp_official/{self.config['model']}", n_ctx=self.config["ctx_size"], n_gpu_layers=40, seed=seed)
|
||||
if not "n_gpu_layers" in self.config:
|
||||
self.config["n_gpu_layers"] = 40
|
||||
self.model = Llama(model_path=f"./models/llama_cpp_official/{self.config['model']}", n_ctx=self.config["ctx_size"], n_gpu_layers=self.config["n_gpu_layers"], seed=seed)
|
||||
|
||||
|
||||
def tokenize(self, prompt):
|
||||
|
@ -1,6 +1,7 @@
|
||||
version: 4
|
||||
config: default
|
||||
ctx_size: 2048
|
||||
n_gpu_layers: 40
|
||||
db_path: databases/database.db
|
||||
debug: false
|
||||
n_threads: 8
|
||||
|
7
installations/install_backend_gpu.bat
Normal file
7
installations/install_backend_gpu.bat
Normal file
@ -0,0 +1,7 @@
|
||||
echo this will recompile llapacpp to use your hardware with gpu enabled.
|
||||
pip uninstall llama-cpp-python -y
|
||||
rem First we need to purge any old installation
|
||||
pip cache purge
|
||||
set CMAKE_ARGS=-DLLAMA_CUBLAS=on
|
||||
set FORCE_CMAKE=1
|
||||
pip install llama-cpp-python --upgrade
|
Loading…
x
Reference in New Issue
Block a user