diff --git a/backends/gpt_j_a/install.py b/backends/gpt_j_a/install.py new file mode 100644 index 00000000..d96fdafd --- /dev/null +++ b/backends/gpt_j_a/install.py @@ -0,0 +1,36 @@ +import subprocess +from pathlib import Path +import requests +from tqdm import tqdm + +class Install: + def __init__(self, api): + # Get the current directory + current_dir = Path(__file__).resolve().parent + install_file = current_dir / ".installed" + + if not install_file.exists(): + print("-------------- GPTj backend by abdeladim -------------------------------") + print("This is the first time you are using this backend.") + print("Installing ...") + try: + print("Checking pytorch") + import torch + import torchvision + if torch.cuda.is_available(): + print("CUDA is supported.") + else: + print("CUDA is not supported. Reinstalling PyTorch with CUDA support.") + self.reinstall_pytorch_with_cuda() + except Exception as ex: + self.reinstall_pytorch_with_cuda() + # Step 2: Install dependencies using pip from requirements.txt + requirements_file = current_dir / "requirements.txt" + subprocess.run(["pip", "install", "--no-cache-dir", "-r", str(requirements_file)]) + with open(install_file,"w") as f: + f.write("ok") + print("Installed successfully") + + def reinstall_pytorch_with_cuda(self): + subprocess.run(["pip", "install", "torch", "torchvision", "torchaudio", "--no-cache-dir", "--index-url", "https://download.pytorch.org/whl/cu117"]) + \ No newline at end of file diff --git a/backends/gpt_j_m/install.py b/backends/gpt_j_m/install.py new file mode 100644 index 00000000..f0349775 --- /dev/null +++ b/backends/gpt_j_m/install.py @@ -0,0 +1,36 @@ +import subprocess +from pathlib import Path +import requests +from tqdm import tqdm + +class Install: + def __init__(self, api): + # Get the current directory + current_dir = Path(__file__).resolve().parent + install_file = current_dir / ".installed" + + if not install_file.exists(): + print("-------------- GPTj backend by marella -------------------------------") + print("This is the first time you are using this backend.") + print("Installing ...") + try: + print("Checking pytorch") + import torch + import torchvision + if torch.cuda.is_available(): + print("CUDA is supported.") + else: + print("CUDA is not supported. Reinstalling PyTorch with CUDA support.") + self.reinstall_pytorch_with_cuda() + except Exception as ex: + self.reinstall_pytorch_with_cuda() + # Step 2: Install dependencies using pip from requirements.txt + requirements_file = current_dir / "requirements.txt" + subprocess.run(["pip", "install", "--no-cache-dir", "-r", str(requirements_file)]) + with open(install_file,"w") as f: + f.write("ok") + print("Installed successfully") + + def reinstall_pytorch_with_cuda(self): + subprocess.run(["pip", "install", "torch", "torchvision", "torchaudio", "--no-cache-dir", "--index-url", "https://download.pytorch.org/whl/cu117"]) + \ No newline at end of file diff --git a/backends/py_llama_cpp/install.py b/backends/py_llama_cpp/install.py new file mode 100644 index 00000000..8f70c81e --- /dev/null +++ b/backends/py_llama_cpp/install.py @@ -0,0 +1,36 @@ +import subprocess +from pathlib import Path +import requests +from tqdm import tqdm + +class Install: + def __init__(self, api): + # Get the current directory + current_dir = Path(__file__).resolve().parent + install_file = current_dir / ".installed" + + if not install_file.exists(): + print("-------------- pyllamacpp backend by abdeladim -------------------------------") + print("This is the first time you are using this backend.") + print("Installing ...") + try: + print("Checking pytorch") + import torch + import torchvision + if torch.cuda.is_available(): + print("CUDA is supported.") + else: + print("CUDA is not supported. Reinstalling PyTorch with CUDA support.") + self.reinstall_pytorch_with_cuda() + except Exception as ex: + self.reinstall_pytorch_with_cuda() + # Step 2: Install dependencies using pip from requirements.txt + requirements_file = current_dir / "requirements.txt" + subprocess.run(["pip", "install", "--no-cache-dir", "-r", str(requirements_file)]) + with open(install_file,"w") as f: + f.write("ok") + print("Installed successfully") + + def reinstall_pytorch_with_cuda(self): + subprocess.run(["pip", "install", "torch", "torchvision", "torchaudio", "--no-cache-dir", "--index-url", "https://download.pytorch.org/whl/cu117"]) + \ No newline at end of file