upgraded installs

This commit is contained in:
saloui 2023-05-25 16:40:28 +02:00
parent 5c114ad82c
commit 9a31873538
14 changed files with 245 additions and 10 deletions

2
app.py
View File

@ -791,7 +791,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
try:
filename = model.get('filename',"")
server = model.get('server',"")
image_url = model.get("image_url", '/icons/default.png')
image_url = model.get("icon", '/images/default_model.png')
license = model.get("license", 'unknown')
owner = model.get("owner", 'unknown')
owner_link = model.get("owner_link", 'https://github.com/ParisNeo')

View File

@ -16,7 +16,6 @@ from pathlib import Path
from typing import Callable
from api.backend import LLMBackend
import yaml
from ctransformers import AutoModelForCausalLM
from api.config import load_config
import re
@ -43,13 +42,9 @@ class CustomBackend(LLMBackend):
# The local config can be used to store personal information that shouldn't be shared like chatgpt Key
# or other personal information
# This file is never commited to the repository as it is ignored by .gitignore
# You can remove this if you don't need custom local configurations
self._local_config_file_path = Path(__file__).parent/"config_local.yaml"
if self._local_config_file_path.exists:
self.config = load_config(self._local_config_file_path)
else:
self.config = {
#Put your default configurations here
}
# Do your initialization stuff
@ -113,6 +108,16 @@ Find it in backends
print(ex)
return output
# Decomment if you want to build a custom model listing
#@staticmethod
#def list_models(config:dict):
# """Lists the models for this backend
# """
# models_dir = Path('./models')/config["backend"] # replace with the actual path to the models folder
# return [f.name for f in models_dir.glob(LLMBackend.file_extension)]
#
@staticmethod
def get_available_models():
# Create the file path relative to the child class's directory

View File

@ -2,6 +2,7 @@ import subprocess
from pathlib import Path
import requests
from tqdm import tqdm
from api.config import save_config
class Install:
def __init__(self, api):
@ -36,11 +37,25 @@ class Install:
models_folder = Path(f"./models/{Path(__file__).parent.stem}")
models_folder.mkdir(exist_ok=True, parents=True)
# The local config can be used to store personal information that shouldn't be shared like chatgpt Key
# or other personal information
# This file is never commited to the repository as it is ignored by .gitignore
# You can remove this if you don't need custom local configurations
self._local_config_file_path = Path(__file__).parent/"config_local.yaml"
if not self._local_config_file_path.exists():
config = {
#Put your default configurations here
}
save_config(config, self._local_config_file_path)
#Create the install file (a file that is used to insure the installation was done correctly)
with open(install_file,"w") as f:
f.write("ok")
print("Installed successfully")
def reinstall_pytorch_with_cuda(self):
"""Installs pytorch with cuda (if you have a gpu)
"""
subprocess.run(["pip", "install", "torch", "torchvision", "torchaudio", "--no-cache-dir", "--index-url", "https://download.pytorch.org/whl/cu117"])

View File

@ -1,5 +1,6 @@
- LLAMA: 'true'
description: GGML format model files for the original LLaMa
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: llama-7b.ggmlv3.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke/
@ -8,6 +9,7 @@
sha256: ec2f2d1f0dfb73b72a4cbac7fa121abbe04c37ab327125a38248f930c0f09ddf
- LLAMA: 'true'
description: GGML format model files for Wizard LM 7B model
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: wizardLM-7B.ggmlv3.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke/
@ -16,6 +18,7 @@
sha256: ea35e30a7c140485b856d0919284ce59e4ca47c1b8af037ea8b7ba05ef291c43
- LLAMA: 'true'
description: GGML format model files for Wizard LM 7B model
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: koala-7b.ggml.unquantized.pr613.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke/

View File

@ -1,4 +1,5 @@
- bestMPT: 'true'
icon: https://cdn-1.webcatalog.io/catalog/mosaicml/mosaicml-icon-filled-256.png?v=1675590559063
description: Current best non-commercially licensable chat model based on MPT and
trained by Mosaic ML.
filename: ggml-mpt-7b-chat.bin
@ -12,6 +13,7 @@
server: https://gpt4all.io/models/
- description: A commericially licensable instruct model based on MPT and trained
by Mosaic ML.
icon: https://cdn-1.webcatalog.io/catalog/mosaicml/mosaicml-icon-filled-256.png?v=1675590559063
filename: ggml-mpt-7b-instruct.bin
filesize: '4854401028'
license: Apache 2.0
@ -20,6 +22,7 @@
owner: Nomic AI
server: https://gpt4all.io/models/
- description: A commercially licensable model base pre-trained by Mosaic ML.
icon: https://cdn-1.webcatalog.io/catalog/mosaicml/mosaicml-icon-filled-256.png?v=1675590559063
filename: ggml-mpt-7b-base.bin
filesize: '4854401028'
license: Non commercial
@ -30,6 +33,7 @@
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Llama 7b and trained by
teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.
icon: https://cdn-1.webcatalog.io/catalog/mosaicml/mosaicml-icon-filled-256.png?v=1675590559063
filename: ggml-vicuna-7b-1.1-q4_2.bin
filesize: '4212859520'
license: Non commercial

View File

@ -3,6 +3,7 @@
owner_link: https://gpt4all.io
description: Current best commercially licensable model based on GPT-J and trained
by Nomic AI on the latest curated GPT4All dataset.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-gpt4all-j-v1.3-groovy.bin
filesize: '3785248281'
isDefault: 'true'
@ -10,6 +11,7 @@
server: https://gpt4all.io/models/
- description: A commercially licensable model based on GPT-J and trained by Nomic
AI on the v2 GPT4All dataset.
icon: https://gpt4all.io/gpt4all-128.png
owner: Nomic AI
owner_link: https://gpt4all.io
filename: ggml-gpt4all-j-v1.2-jazzy.bin
@ -18,6 +20,7 @@
server: https://gpt4all.io/models/
- description: A commercially licensable model based on GPT-J and trained by Nomic
AI on the v1 GPT4All dataset.
icon: https://gpt4all.io/gpt4all-128.png
owner: Nomic AI
owner_link: https://gpt4all.io
filename: ggml-gpt4all-j-v1.1-breezy.bin
@ -26,6 +29,7 @@
server: https://gpt4all.io/models/
- description: A commercially licensable model based on GPT-J and trained by Nomic
AI on the v0 GPT4All dataset.
icon: https://gpt4all.io/gpt4all-128.png
owner: Nomic AI
owner_link: https://gpt4all.io
filename: ggml-gpt4all-j.bin
@ -34,6 +38,7 @@
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Llama 7b and trained by
teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.
icon: https://gpt4all.io/gpt4all-128.png
owner: Nomic AI
owner_link: https://gpt4all.io
filename: ggml-vicuna-7b-1.1-q4_2.bin
@ -42,6 +47,7 @@
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Llama 13b and trained
by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.
icon: https://gpt4all.io/gpt4all-128.png
owner: Nomic AI
owner_link: https://gpt4all.io
filename: ggml-vicuna-13b-1.1-q4_2.bin
@ -50,6 +56,7 @@
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Llama 7b and trained by
Microsoft and Peking University.
icon: https://gpt4all.io/gpt4all-128.png
owner: Nomic AI
owner_link: https://gpt4all.io
filename: ggml-wizardLM-7B.q4_2.bin
@ -58,6 +65,7 @@
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Llama 13b and RLHF trained
by Stable AI.
icon: https://gpt4all.io/gpt4all-128.png
owner: Nomic AI
owner_link: https://gpt4all.io
filename: ggml-stable-vicuna-13B.q4_2.bin
@ -66,6 +74,7 @@
server: https://gpt4all.io/models/
- description: A commercially licensable model base pre-trained by Mosaic ML.
owner: Nomic AI
icon: https://gpt4all.io/gpt4all-128.png
owner_link: https://gpt4all.io
filename: ggml-mpt-7b-base.bin
filesize: '4854401028'
@ -74,12 +83,14 @@
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Vicuna 13b, fine-tuned
on ~180,000 instructions, trained by Nous Research.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-nous-gpt4-vicuna-13b.bin
filesize: '8136777088'
md5sum: d5eafd5b0bd0d615cfd5fd763f642dfe
server: https://gpt4all.io/models/
- description: A commericially licensable instruct model based on MPT and trained
by Mosaic ML.
icon: https://gpt4all.io/gpt4all-128.png
owner: Nomic AI
owner_link: https://gpt4all.io
filename: ggml-mpt-7b-instruct.bin

View File

@ -1,6 +1,7 @@
- bestGPTJ: 'true'
description: Current best commercially licensable model based on GPT-J and trained
by Nomic AI on the latest curated GPT4All dataset.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-gpt4all-j-v1.3-groovy.bin
filesize: '3785248281'
isDefault: 'true'
@ -8,12 +9,14 @@
- bestLlama: 'true'
description: Current best non-commercially licensable model based on Llama 13b and
trained by Nomic AI on the latest curated GPT4All dataset.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-gpt4all-l13b-snoozy.bin
filesize: '8136770688'
md5sum: 91f886b68fbce697e9a3cd501951e455
- bestMPT: 'true'
description: Current best non-commercially licensable chat model based on MPT and
trained by Mosaic ML.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-mpt-7b-chat.bin
filesize: '4854401050'
isDefault: 'true'
@ -21,51 +24,61 @@
requires: 2.4.1
- description: A commercially licensable model based on GPT-J and trained by Nomic
AI on the v2 GPT4All dataset.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-gpt4all-j-v1.2-jazzy.bin
filesize: '3785248281'
md5sum: 879344aaa9d62fdccbda0be7a09e7976
- description: A commercially licensable model based on GPT-J and trained by Nomic
AI on the v1 GPT4All dataset.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-gpt4all-j-v1.1-breezy.bin
filesize: '3785248281'
md5sum: 61d48a82cb188cceb14ebb8082bfec37
- description: A commercially licensable model based on GPT-J and trained by Nomic
AI on the v0 GPT4All dataset.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-gpt4all-j.bin
filesize: '3785248281'
md5sum: 5b5a3f9b858d33b29b52b89692415595
- description: A non-commercially licensable model based on Llama 7b and trained by
teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-vicuna-7b-1.1-q4_2.bin
filesize: '4212859520'
md5sum: 29119f8fa11712704c6b22ac5ab792ea
- description: A non-commercially licensable model based on Llama 13b and trained
by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-vicuna-13b-1.1-q4_2.bin
filesize: '8136770688'
md5sum: 95999b7b0699e2070af63bf5d34101a8
- description: A non-commercially licensable model based on Llama 7b and trained by
Microsoft and Peking University.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-wizardLM-7B.q4_2.bin
filesize: '4212864640'
md5sum: 99e6d129745a3f1fb1121abed747b05a
- description: A non-commercially licensable model based on Llama 13b and RLHF trained
by Stable AI.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-stable-vicuna-13B.q4_2.bin
filesize: '8136777088'
md5sum: 6cb4ee297537c9133bddab9692879de0
- description: A commercially licensable model base pre-trained by Mosaic ML.
filename: ggml-mpt-7b-base.bin
icon: https://gpt4all.io/gpt4all-128.png
filesize: '4854401028'
md5sum: 120c32a51d020066288df045ef5d52b9
requires: 2.4.1
- description: A non-commercially licensable model based on Vicuna 13b, fine-tuned
on ~180,000 instructions, trained by Nous Research.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-nous-gpt4-vicuna-13b.bin
filesize: '8136777088'
md5sum: d5eafd5b0bd0d615cfd5fd763f642dfe
- description: A commericially licensable instruct model based on MPT and trained
by Mosaic ML.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-mpt-7b-instruct.bin
filesize: '4854401028'
md5sum: 1cfa4958f489f0a0d1ffdf6b37322809

View File

@ -1,5 +1,6 @@
- bestLlama: 'true'
description: The official open assistant 30B model finally here
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: OpenAssistant-SFT-7-Llama-30B.ggml.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke
@ -9,6 +10,7 @@
- bestLlama: 'true'
description: 'Manticore-13B'
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: Manticore-13B.ggmlv3.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke
@ -19,6 +21,7 @@
- bestLlama: 'true'
description: Legacy version of Vicuna 7B v 1.1 Quantized on 4 bits
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: legacy-ggml-vicuna-7B-1.1-q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/CRD716
@ -27,6 +30,7 @@
sha256: 67efec973a81151a55e55f8e747b455354979492978b2f9f22a342c6d841e6b7
- bestLlama: 'true'
description: 'WizardLM - uncensored: An Instruction-following LLM Using Evol-Instruct'
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: WizardLM-7B-uncensored.ggml.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke

1
backends/open_ai/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
config_local.yaml

View File

@ -0,0 +1,126 @@
######
# Project : GPT4ALL-UI
# File : backend.py
# Author : ParisNeo with the help of the community
# Underlying backend : Abdeladim's pygptj backend
# Supported by Nomic-AI
# license : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui backends.
# This backend is a wrapper to marella's backend
# Follow him on his github project : https://github.com/marella/ctransformers
######
from pathlib import Path
from typing import Callable
from api.backend import LLMBackend
from api.config import load_config
import yaml
import re
__author__ = "parisneo"
__github__ = "https://github.com/nomic-ai/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
backend_name = "CustomBackend"
class CustomBackend(LLMBackend):
# Define what is the extension of the model files supported by your backend
# Only applicable for local models for remote models like gpt4 and others, you can keep it empty
# and reimplement your own list_models method
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a LLAMACPP backend
Args:
config (dict): The configuration file
"""
super().__init__(config, False)
# The local config can be used to store personal information that shouldn't be shared like chatgpt Key
# or other personal information
# This file is never commited to the repository as it is ignored by .gitignore
self._local_config_file_path = Path(__file__).parent/"config_local.yaml"
self.config = load_config(self._local_config_file_path)
# Do your initialization stuff
def tokenize(self, prompt):
"""
Tokenizes the given prompt using the model's tokenizer.
Args:
prompt (str): The input prompt to be tokenized.
Returns:
list: A list of tokens representing the tokenized prompt.
"""
return self.model.tokenize(prompt.encode())
def detokenize(self, tokens_list):
"""
Detokenizes the given list of tokens using the model's tokenizer.
Args:
tokens_list (list): A list of tokens to be detokenized.
Returns:
str: The detokenized text as a string.
"""
return self.model.detokenize(tokens_list)
def generate(self,
prompt:str,
n_predict: int = 128,
new_text_callback: Callable[[str], None] = bool,
verbose: bool = False,
**gpt_params ):
"""Generates text out of a prompt
Args:
prompt (str): The prompt to use for generation
n_predict (int, optional): Number of tokens to prodict. Defaults to 128.
new_text_callback (Callable[[str], None], optional): A callback function that is called everytime a new text element is generated. Defaults to None.
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
"""
try:
output = ""
self.model.reset()
tokens = self.model.tokenize(prompt)
count = 0
generated_text = """
This is an empty backend that shows how you can build your own backend.
Find it in backends
"""
for tok in re.split(r' |\n', generated_text):
if count >= n_predict or self.model.is_eos_token(tok):
break
word = self.model.detokenize(tok)
if new_text_callback is not None:
if not new_text_callback(word):
break
output += word
count += 1
except Exception as ex:
print(ex)
return output
@staticmethod
def list_models(config:dict):
"""Lists the models for this backend
"""
return ["ChatGpt by Open AI"]
@staticmethod
def get_available_models():
# Create the file path relative to the child class's directory
backend_path = Path(__file__).parent
file_path = backend_path/"models.yaml"
with open(file_path, 'r') as file:
yaml_data = yaml.safe_load(file)
return yaml_data

View File

@ -0,0 +1,44 @@
import subprocess
from pathlib import Path
import requests
from tqdm import tqdm
from api.config import save_config
class Install:
def __init__(self, api):
# Get the current directory
current_dir = Path(__file__).resolve().parent
install_file = current_dir / ".installed"
if not install_file.exists():
print("-------------- Template backend -------------------------------")
print("This is the first time you are using this backend.")
print("Installing ...")
# Step 2: Install dependencies using pip from requirements.txt
requirements_file = current_dir / "requirements.txt"
subprocess.run(["pip", "install", "--upgrade", "--no-cache-dir", "-r", str(requirements_file)])
# Create the models folder
models_folder = Path(f"./models/{Path(__file__).parent.stem}")
models_folder.mkdir(exist_ok=True, parents=True)
#Create
self._local_config_file_path = Path(__file__).parent/"config_local.yaml"
if not self._local_config_file_path.exists:
key = input("Please enter your Open AI Key")
config={
"openai_key":key
}
self.config = save_config(config, self._local_config_file_path)
#Create the install file (a file that is used to insure the installation was done correctly)
with open(install_file,"w") as f:
f.write("ok")
print("Installed successfully")
def reinstall_pytorch_with_cuda(self):
"""Installs pytorch with cuda (if you have a gpu)
"""
subprocess.run(["pip", "install", "torch", "torchvision", "torchaudio", "--no-cache-dir", "--index-url", "https://download.pytorch.org/whl/cu117"])

View File

@ -0,0 +1,8 @@
- description: ChatGPT model
icon: https://www.google.fr/url?sa=i&url=https%3A%2F%2Fcommons.wikimedia.org%2Fwiki%2FFile%3AChatGPT_logo.svg&psig=AOvVaw1rUG9Bl0WfHOYRJF7LgSmA&ust=1685107628710000&source=images&cd=vfe&ved=0CBEQjRxqFwoTCJDr0J7JkP8CFQAAAAAdAAAAABAE
filename: ChatGpt by Open AI
license: Commercial
owner_link: https://link_to_the_owner_web_page
owner: Open AI
server: https://openai.com
sha256: NONE

View File

@ -0,0 +1 @@
openai

Binary file not shown.

After

Width:  |  Height:  |  Size: 245 KiB