mirror of
https://github.com/ParisNeo/lollms-webui.git
synced 2025-04-14 22:26:37 +00:00
upgraded install
This commit is contained in:
parent
1b268a8897
commit
c5d3f92ba8
@ -13,6 +13,7 @@ class Install:
|
||||
print("-------------- cTransformers backend -------------------------------")
|
||||
print("This is the first time you are using this backend.")
|
||||
print("Installing ...")
|
||||
"""
|
||||
try:
|
||||
print("Checking pytorch")
|
||||
import torch
|
||||
@ -24,6 +25,8 @@ class Install:
|
||||
self.reinstall_pytorch_with_cuda()
|
||||
except Exception as ex:
|
||||
self.reinstall_pytorch_with_cuda()
|
||||
"""
|
||||
|
||||
# Step 2: Install dependencies using pip from requirements.txt
|
||||
requirements_file = current_dir / "requirements.txt"
|
||||
subprocess.run(["pip", "install", "--upgrade", "--no-cache-dir", "-r", str(requirements_file)])
|
||||
|
@ -13,6 +13,7 @@ class Install:
|
||||
print("-------------- GPT4All backend by nomic-ai -------------------------------")
|
||||
print("This is the first time you are using this backend.")
|
||||
print("Installing ...")
|
||||
"""
|
||||
try:
|
||||
print("Checking pytorch")
|
||||
import torch
|
||||
@ -23,7 +24,9 @@ class Install:
|
||||
print("CUDA is not supported. Reinstalling PyTorch with CUDA support.")
|
||||
self.reinstall_pytorch_with_cuda()
|
||||
except Exception as ex:
|
||||
self.reinstall_pytorch_with_cuda()
|
||||
self.reinstall_pytorch_with_cuda()
|
||||
"""
|
||||
|
||||
# Step 2: Install dependencies using pip from requirements.txt
|
||||
requirements_file = current_dir / "requirements.txt"
|
||||
subprocess.run(["pip", "install", "--no-cache-dir", "-r", str(requirements_file)])
|
||||
|
@ -717,10 +717,7 @@ class GPT4AllAPI():
|
||||
self.process.generate(self.discussion_messages, self.current_message, message_id, n_predict = self.config['n_predict'])
|
||||
self.process.started_queue.get()
|
||||
while(self.process.is_generating.value): # Simulating other commands being issued
|
||||
chunk = ""
|
||||
while not self.process.generation_queue.empty():
|
||||
chk, tok, message_type = self.process.generation_queue.get()
|
||||
chunk += chk
|
||||
chunk, tok, message_type = self.process.generation_queue.get()
|
||||
if chunk!="":
|
||||
self.process_chunk(chunk)
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user