lollms-webui/backends/llama_cpp/__init__.py

82 lines
3.1 KiB
Python
Raw Normal View History

2023-04-20 17:30:03 +00:00
######
# Project : GPT4ALL-UI
# File : backend.py
# Author : ParisNeo with the help of the community
# Supported by Nomic-AI
# Licence : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui backends.
######
from pathlib import Path
from typing import Callable
from pyllamacpp.model import Model
from pyGpt4All.backend import GPTBackend
2023-05-13 12:19:56 +00:00
import yaml
2023-04-20 17:30:03 +00:00
__author__ = "parisneo"
__github__ = "https://github.com/nomic-ai/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
2023-04-23 18:28:24 +00:00
backend_name = "LLAMACPP"
2023-04-20 17:30:03 +00:00
class LLAMACPP(GPTBackend):
2023-04-23 14:59:00 +00:00
file_extension='*.bin'
2023-04-20 17:30:03 +00:00
def __init__(self, config:dict) -> None:
"""Builds a LLAMACPP backend
Args:
config (dict): The configuration file
"""
2023-04-23 22:19:15 +00:00
super().__init__(config, False)
2023-04-20 17:30:03 +00:00
self.model = Model(
2023-05-02 09:02:59 +00:00
model_path=f"./models/llama_cpp/{self.config['model']}",
prompt_context="", prompt_prefix="", prompt_suffix="",
2023-04-20 17:30:03 +00:00
n_ctx=self.config['ctx_size'],
seed=self.config['seed'],
)
2023-04-27 23:39:57 +00:00
def stop_generation(self):
self.model._grab_text_callback()
2023-04-20 17:30:03 +00:00
def generate(self,
prompt:str,
n_predict: int = 128,
new_text_callback: Callable[[str], None] = bool,
verbose: bool = False,
**gpt_params ):
"""Generates text out of a prompt
Args:
prompt (str): The prompt to use for generation
n_predict (int, optional): Number of tokens to prodict. Defaults to 128.
new_text_callback (Callable[[str], None], optional): A callback function that is called everytime a new text element is generated. Defaults to None.
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
"""
try:
self.model.reset()
for tok in self.model.generate(prompt,
n_predict=n_predict,
temp=self.config['temperature'],
top_k=self.config['top_k'],
top_p=self.config['top_p'],
repeat_penalty=self.config['repeat_penalty'],
repeat_last_n = self.config['repeat_last_n'],
n_threads=self.config['n_threads'],
):
if not new_text_callback(tok):
return
except Exception as ex:
2023-05-13 12:19:56 +00:00
print(ex)
@staticmethod
def get_available_models():
# Create the file path relative to the child class's directory
backend_path = Path(__file__).parent
file_path = backend_path/"models.yaml"
with open(file_path, 'r') as file:
yaml_data = yaml.safe_load(file)
return yaml_data