2023-04-20 17:30:03 +00:00
|
|
|
######
|
|
|
|
# Project : GPT4ALL-UI
|
|
|
|
# File : backend.py
|
|
|
|
# Author : ParisNeo with the help of the community
|
2023-05-17 15:38:40 +00:00
|
|
|
# Underlying backend : Abdeladim's pygptj backend
|
2023-04-20 17:30:03 +00:00
|
|
|
# Supported by Nomic-AI
|
2023-05-21 20:46:02 +00:00
|
|
|
# license : Apache 2.0
|
2023-04-20 17:30:03 +00:00
|
|
|
# Description :
|
|
|
|
# This is an interface class for GPT4All-ui backends.
|
2023-05-17 15:38:40 +00:00
|
|
|
|
|
|
|
# This backend is a wrapper to marella's backend
|
|
|
|
# Follow him on his github project : https://github.com/marella/ctransformers
|
|
|
|
|
2023-04-20 17:30:03 +00:00
|
|
|
######
|
|
|
|
from pathlib import Path
|
|
|
|
from typing import Callable
|
2023-05-14 00:29:09 +00:00
|
|
|
from gpt4all_api.backend import GPTBackend
|
2023-05-13 12:19:56 +00:00
|
|
|
import yaml
|
2023-05-17 15:38:40 +00:00
|
|
|
from ctransformers import AutoModelForCausalLM
|
2023-04-20 17:30:03 +00:00
|
|
|
|
|
|
|
__author__ = "parisneo"
|
|
|
|
__github__ = "https://github.com/nomic-ai/gpt4all-ui"
|
|
|
|
__copyright__ = "Copyright 2023, "
|
|
|
|
__license__ = "Apache 2.0"
|
|
|
|
|
2023-05-17 15:38:40 +00:00
|
|
|
backend_name = "GPTJ"
|
2023-04-20 17:30:03 +00:00
|
|
|
|
2023-05-17 15:38:40 +00:00
|
|
|
class GPTJ(GPTBackend):
|
2023-04-23 14:59:00 +00:00
|
|
|
file_extension='*.bin'
|
2023-04-20 17:30:03 +00:00
|
|
|
def __init__(self, config:dict) -> None:
|
|
|
|
"""Builds a LLAMACPP backend
|
|
|
|
|
|
|
|
Args:
|
|
|
|
config (dict): The configuration file
|
|
|
|
"""
|
2023-04-23 22:19:15 +00:00
|
|
|
super().__init__(config, False)
|
2023-05-17 15:38:40 +00:00
|
|
|
if 'gpt2' in self.config['model']:
|
|
|
|
model_type='gpt2'
|
|
|
|
elif 'gptj' in self.config['model']:
|
|
|
|
model_type='gptj'
|
|
|
|
elif 'gpt_neox' in self.config['model']:
|
|
|
|
model_type='gpt_neox'
|
|
|
|
elif 'dolly-v2' in self.config['model']:
|
|
|
|
model_type='dolly-v2'
|
|
|
|
elif 'starcoder' in self.config['model']:
|
|
|
|
model_type='starcoder'
|
2023-05-21 23:12:15 +00:00
|
|
|
elif 'llama' in self.config['model'] or 'wizardLM' in self.config['model']:
|
2023-05-21 19:54:56 +00:00
|
|
|
model_type='llama'
|
|
|
|
elif 'mpt' in self.config['model']:
|
|
|
|
model_type='mpt'
|
2023-05-17 15:38:40 +00:00
|
|
|
else:
|
|
|
|
print("The model you are using is not supported by this backend")
|
|
|
|
return
|
2023-04-20 17:30:03 +00:00
|
|
|
|
2023-05-17 15:38:40 +00:00
|
|
|
|
|
|
|
if self.config["use_avx2"]:
|
|
|
|
self.model = AutoModelForCausalLM.from_pretrained(
|
|
|
|
f"./models/c_transformers/{self.config['model']}", model_type=model_type
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
self.model = AutoModelForCausalLM.from_pretrained(
|
|
|
|
f"./models/c_transformers/{self.config['model']}", model_type=model_type, lib = "avx"
|
|
|
|
)
|
2023-05-18 19:31:18 +00:00
|
|
|
|
|
|
|
def tokenize(self, prompt):
|
|
|
|
"""
|
|
|
|
Tokenizes the given prompt using the model's tokenizer.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
prompt (str): The input prompt to be tokenized.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
list: A list of tokens representing the tokenized prompt.
|
|
|
|
"""
|
|
|
|
return self.model.tokenize(prompt.encode())
|
2023-04-20 17:30:03 +00:00
|
|
|
|
2023-05-18 19:31:18 +00:00
|
|
|
def detokenize(self, tokens_list):
|
|
|
|
"""
|
|
|
|
Detokenizes the given list of tokens using the model's tokenizer.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
tokens_list (list): A list of tokens to be detokenized.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
str: The detokenized text as a string.
|
|
|
|
"""
|
|
|
|
return self.model.detokenize(tokens_list)
|
|
|
|
|
2023-04-20 17:30:03 +00:00
|
|
|
def generate(self,
|
|
|
|
prompt:str,
|
|
|
|
n_predict: int = 128,
|
|
|
|
new_text_callback: Callable[[str], None] = bool,
|
|
|
|
verbose: bool = False,
|
|
|
|
**gpt_params ):
|
|
|
|
"""Generates text out of a prompt
|
|
|
|
|
|
|
|
Args:
|
|
|
|
prompt (str): The prompt to use for generation
|
|
|
|
n_predict (int, optional): Number of tokens to prodict. Defaults to 128.
|
|
|
|
new_text_callback (Callable[[str], None], optional): A callback function that is called everytime a new text element is generated. Defaults to None.
|
|
|
|
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
|
|
|
|
"""
|
2023-04-24 20:02:50 +00:00
|
|
|
try:
|
2023-05-19 01:32:38 +00:00
|
|
|
output = ""
|
2023-05-21 19:54:56 +00:00
|
|
|
self.model.reset()
|
|
|
|
tokens = self.model.tokenize(prompt)
|
|
|
|
count = 0
|
2023-05-17 15:38:40 +00:00
|
|
|
for tok in self.model.generate(
|
2023-05-21 19:54:56 +00:00
|
|
|
tokens,
|
2023-04-30 01:15:11 +00:00
|
|
|
top_k=self.config['top_k'],
|
|
|
|
top_p=self.config['top_p'],
|
2023-05-21 19:54:56 +00:00
|
|
|
temperature=self.config['temperature'],
|
|
|
|
repetition_penalty=self.config['repeat_penalty'],
|
|
|
|
seed=self.config['seed'],
|
|
|
|
batch_size=1,
|
|
|
|
threads = self.config['n_threads'],
|
2023-05-17 15:38:40 +00:00
|
|
|
reset=True,
|
2023-04-30 01:15:11 +00:00
|
|
|
):
|
2023-05-21 19:54:56 +00:00
|
|
|
|
|
|
|
|
|
|
|
if count >= n_predict or self.model.is_eos_token(tok):
|
|
|
|
break
|
2023-05-19 01:32:38 +00:00
|
|
|
word = self.model.detokenize(tok)
|
|
|
|
if new_text_callback is not None:
|
|
|
|
if not new_text_callback(word):
|
2023-05-21 19:54:56 +00:00
|
|
|
break
|
2023-05-19 01:32:38 +00:00
|
|
|
output += word
|
2023-05-21 19:54:56 +00:00
|
|
|
count += 1
|
|
|
|
|
|
|
|
|
2023-04-24 20:02:50 +00:00
|
|
|
except Exception as ex:
|
2023-05-13 12:19:56 +00:00
|
|
|
print(ex)
|
2023-05-19 01:32:38 +00:00
|
|
|
return output
|
2023-05-17 15:38:40 +00:00
|
|
|
|
2023-05-13 12:19:56 +00:00
|
|
|
@staticmethod
|
|
|
|
def get_available_models():
|
|
|
|
# Create the file path relative to the child class's directory
|
|
|
|
backend_path = Path(__file__).parent
|
|
|
|
file_path = backend_path/"models.yaml"
|
|
|
|
|
|
|
|
with open(file_path, 'r') as file:
|
|
|
|
yaml_data = yaml.safe_load(file)
|
|
|
|
|
|
|
|
return yaml_data
|