lollms-webui/backends/open_ai/__init__.py
2023-05-25 16:40:28 +02:00

126 lines
4.3 KiB
Python

######
# Project : GPT4ALL-UI
# File : backend.py
# Author : ParisNeo with the help of the community
# Underlying backend : Abdeladim's pygptj backend
# Supported by Nomic-AI
# license : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui backends.
# This backend is a wrapper to marella's backend
# Follow him on his github project : https://github.com/marella/ctransformers
######
from pathlib import Path
from typing import Callable
from api.backend import LLMBackend
from api.config import load_config
import yaml
import re
__author__ = "parisneo"
__github__ = "https://github.com/nomic-ai/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
backend_name = "CustomBackend"
class CustomBackend(LLMBackend):
# Define what is the extension of the model files supported by your backend
# Only applicable for local models for remote models like gpt4 and others, you can keep it empty
# and reimplement your own list_models method
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a LLAMACPP backend
Args:
config (dict): The configuration file
"""
super().__init__(config, False)
# The local config can be used to store personal information that shouldn't be shared like chatgpt Key
# or other personal information
# This file is never commited to the repository as it is ignored by .gitignore
self._local_config_file_path = Path(__file__).parent/"config_local.yaml"
self.config = load_config(self._local_config_file_path)
# Do your initialization stuff
def tokenize(self, prompt):
"""
Tokenizes the given prompt using the model's tokenizer.
Args:
prompt (str): The input prompt to be tokenized.
Returns:
list: A list of tokens representing the tokenized prompt.
"""
return self.model.tokenize(prompt.encode())
def detokenize(self, tokens_list):
"""
Detokenizes the given list of tokens using the model's tokenizer.
Args:
tokens_list (list): A list of tokens to be detokenized.
Returns:
str: The detokenized text as a string.
"""
return self.model.detokenize(tokens_list)
def generate(self,
prompt:str,
n_predict: int = 128,
new_text_callback: Callable[[str], None] = bool,
verbose: bool = False,
**gpt_params ):
"""Generates text out of a prompt
Args:
prompt (str): The prompt to use for generation
n_predict (int, optional): Number of tokens to prodict. Defaults to 128.
new_text_callback (Callable[[str], None], optional): A callback function that is called everytime a new text element is generated. Defaults to None.
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
"""
try:
output = ""
self.model.reset()
tokens = self.model.tokenize(prompt)
count = 0
generated_text = """
This is an empty backend that shows how you can build your own backend.
Find it in backends
"""
for tok in re.split(r' |\n', generated_text):
if count >= n_predict or self.model.is_eos_token(tok):
break
word = self.model.detokenize(tok)
if new_text_callback is not None:
if not new_text_callback(word):
break
output += word
count += 1
except Exception as ex:
print(ex)
return output
@staticmethod
def list_models(config:dict):
"""Lists the models for this backend
"""
return ["ChatGpt by Open AI"]
@staticmethod
def get_available_models():
# Create the file path relative to the child class's directory
backend_path = Path(__file__).parent
file_path = backend_path/"models.yaml"
with open(file_path, 'r') as file:
yaml_data = yaml.safe_load(file)
return yaml_data