mirror of
https://github.com/ParisNeo/lollms-webui.git
synced 2025-06-22 00:41:54 +00:00
upgraded install
This commit is contained in:
@ -13,6 +13,7 @@ class Install:
|
|||||||
print("-------------- cTransformers backend -------------------------------")
|
print("-------------- cTransformers backend -------------------------------")
|
||||||
print("This is the first time you are using this backend.")
|
print("This is the first time you are using this backend.")
|
||||||
print("Installing ...")
|
print("Installing ...")
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
print("Checking pytorch")
|
print("Checking pytorch")
|
||||||
import torch
|
import torch
|
||||||
@ -24,6 +25,8 @@ class Install:
|
|||||||
self.reinstall_pytorch_with_cuda()
|
self.reinstall_pytorch_with_cuda()
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
self.reinstall_pytorch_with_cuda()
|
self.reinstall_pytorch_with_cuda()
|
||||||
|
"""
|
||||||
|
|
||||||
# Step 2: Install dependencies using pip from requirements.txt
|
# Step 2: Install dependencies using pip from requirements.txt
|
||||||
requirements_file = current_dir / "requirements.txt"
|
requirements_file = current_dir / "requirements.txt"
|
||||||
subprocess.run(["pip", "install", "--upgrade", "--no-cache-dir", "-r", str(requirements_file)])
|
subprocess.run(["pip", "install", "--upgrade", "--no-cache-dir", "-r", str(requirements_file)])
|
||||||
|
@ -13,6 +13,7 @@ class Install:
|
|||||||
print("-------------- GPT4All backend by nomic-ai -------------------------------")
|
print("-------------- GPT4All backend by nomic-ai -------------------------------")
|
||||||
print("This is the first time you are using this backend.")
|
print("This is the first time you are using this backend.")
|
||||||
print("Installing ...")
|
print("Installing ...")
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
print("Checking pytorch")
|
print("Checking pytorch")
|
||||||
import torch
|
import torch
|
||||||
@ -23,7 +24,9 @@ class Install:
|
|||||||
print("CUDA is not supported. Reinstalling PyTorch with CUDA support.")
|
print("CUDA is not supported. Reinstalling PyTorch with CUDA support.")
|
||||||
self.reinstall_pytorch_with_cuda()
|
self.reinstall_pytorch_with_cuda()
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
self.reinstall_pytorch_with_cuda()
|
self.reinstall_pytorch_with_cuda()
|
||||||
|
"""
|
||||||
|
|
||||||
# Step 2: Install dependencies using pip from requirements.txt
|
# Step 2: Install dependencies using pip from requirements.txt
|
||||||
requirements_file = current_dir / "requirements.txt"
|
requirements_file = current_dir / "requirements.txt"
|
||||||
subprocess.run(["pip", "install", "--no-cache-dir", "-r", str(requirements_file)])
|
subprocess.run(["pip", "install", "--no-cache-dir", "-r", str(requirements_file)])
|
||||||
|
@ -717,10 +717,7 @@ class GPT4AllAPI():
|
|||||||
self.process.generate(self.discussion_messages, self.current_message, message_id, n_predict = self.config['n_predict'])
|
self.process.generate(self.discussion_messages, self.current_message, message_id, n_predict = self.config['n_predict'])
|
||||||
self.process.started_queue.get()
|
self.process.started_queue.get()
|
||||||
while(self.process.is_generating.value): # Simulating other commands being issued
|
while(self.process.is_generating.value): # Simulating other commands being issued
|
||||||
chunk = ""
|
chunk, tok, message_type = self.process.generation_queue.get()
|
||||||
while not self.process.generation_queue.empty():
|
|
||||||
chk, tok, message_type = self.process.generation_queue.get()
|
|
||||||
chunk += chk
|
|
||||||
if chunk!="":
|
if chunk!="":
|
||||||
self.process_chunk(chunk)
|
self.process_chunk(chunk)
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user