lollms-webui/backends/open_ai/install.py

44 lines
1.8 KiB
Python
Raw Normal View History

2023-05-25 14:40:28 +00:00
import subprocess
from pathlib import Path
import requests
from tqdm import tqdm
from api.config import save_config
class Install:
def __init__(self, api):
# Get the current directory
current_dir = Path(__file__).resolve().parent
install_file = current_dir / ".installed"
if not install_file.exists():
print("-------------- Template backend -------------------------------")
print("This is the first time you are using this backend.")
print("Installing ...")
# Step 2: Install dependencies using pip from requirements.txt
requirements_file = current_dir / "requirements.txt"
subprocess.run(["pip", "install", "--upgrade", "--no-cache-dir", "-r", str(requirements_file)])
# Create the models folder
models_folder = Path(f"./models/{Path(__file__).parent.stem}")
models_folder.mkdir(exist_ok=True, parents=True)
#Create
self._local_config_file_path = Path(__file__).parent/"config_local.yaml"
if not self._local_config_file_path.exists:
key = input("Please enter your Open AI Key")
config={
"openai_key":key
}
self.config = save_config(config, self._local_config_file_path)
#Create the install file (a file that is used to insure the installation was done correctly)
with open(install_file,"w") as f:
f.write("ok")
print("Installed successfully")
def reinstall_pytorch_with_cuda(self):
"""Installs pytorch with cuda (if you have a gpu)
"""
subprocess.run(["pip", "install", "torch", "torchvision", "torchaudio", "--no-cache-dir", "--index-url", "https://download.pytorch.org/whl/cu117"])