removed unnecessary stuff

This commit is contained in:
Saifeddine ALOUI 2023-06-30 19:11:09 +02:00
parent 462cd27e4f
commit 3c38f47f47
2 changed files with 0 additions and 172 deletions

View File

@ -1,88 +0,0 @@
######
# Project : lollms-webui
# File : binding.py
# Author : ParisNeo with the help of the community
# Supported by Nomic-AI
# license : Apache 2.0
# Description :
# This is an interface class for lollms-webui bindings.
######
from pathlib import Path
from typing import Callable
import inspect
import yaml
import sys
__author__ = "parisneo"
__github__ = "https://github.com/ParisNeo/lollms-webui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
class LLMBinding:
file_extension='*.bin'
binding_path = Path(__file__).parent
def __init__(self, config:dict, inline:bool) -> None:
self.config = config
self.inline = inline
def generate(self,
prompt:str,
n_predict: int = 128,
new_text_callback: Callable[[str], None] = None,
verbose: bool = False,
**gpt_params ):
"""Generates text out of a prompt
This should ber implemented by child class
Args:
prompt (str): The prompt to use for generation
n_predict (int, optional): Number of tokens to prodict. Defaults to 128.
new_text_callback (Callable[[str], None], optional): A callback function that is called everytime a new text element is generated. Defaults to None.
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
"""
pass
def tokenize(self, prompt):
"""
Tokenizes the given prompt using the model's tokenizer.
Args:
prompt (str): The input prompt to be tokenized.
Returns:
list: A list of tokens representing the tokenized prompt.
"""
pass
def detokenize(self, tokens_list):
"""
Detokenizes the given list of tokens using the model's tokenizer.
Args:
tokens_list (list): A list of tokens to be detokenized.
Returns:
str: The detokenized text as a string.
"""
pass
@staticmethod
def list_models(config:dict):
"""Lists the models for this binding
"""
models_dir = Path('./models')/config["binding_name"] # replace with the actual path to the models folder
return [f.name for f in models_dir.glob(LLMBinding.file_extension)]
@staticmethod
def get_available_models():
# Create the file path relative to the child class's directory
binding_path = Path(__file__).parent
file_path = binding_path/"models.yaml"
with open(file_path, 'r') as file:
yaml_data = yaml.safe_load(file)
return yaml_data

View File

@ -1,84 +0,0 @@
try:
from langchain.llms.base import LLM
except ImportError:
raise ImportError(
'To use the ctransformers.langchain module, please install the '
'`langchain` python package: `pip install langchain`')
from typing import Any, Dict, Optional, Sequence
from pydantic import root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from api.binding import LLMBinding
class GenericBinding(LLM):
"""Wrapper around All compatible LLM interfaces.
Thanks to Marella for providing the base for this work.
To follow him, here is his github profile:
To use, you should have the `langchain` python package installed.
"""
client: Any #: :meta private:
model: str
"""The path to a model file or directory or the name of a Hugging Face Hub
model repo."""
model_type: Optional[str] = None
"""The model type."""
model_file: Optional[str] = None
"""The name of the model file in repo or directory."""
config: Optional[Dict[str, Any]] = None
"""The config parameters."""
lib: Optional[Any] = None
"""The path to a shared library or one of `avx2`, `avx`, `basic`."""
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
'model': self.model,
'model_type': self.model_type,
'model_file': self.model_file,
'config': self.config,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return 'generic_binding'
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate and load model from a local file or remote repo."""
config = values['config'] or {}
values['client'] = LLMBinding(config, True)
return values
def _call(
self,
prompt: str,
stop: Optional[Sequence[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Generate text from a prompt.
Args:
prompt: The prompt to generate text from.
stop: A list of sequences to stop generation when encountered.
Returns:
The generated text.
"""
text = []
for chunk in self.client(prompt, stop=stop, stream=True):
text.append(chunk)
if run_manager:
run_manager.on_llm_new_token(chunk, verbose=self.verbose)
return ''.join(text)