local update

This commit is contained in:
Saifeddine ALOUI 2023-06-05 01:21:12 +02:00
parent e98b6ec584
commit fb2f7941ab
69 changed files with 170 additions and 2679 deletions

View File

@ -11,7 +11,8 @@ from datetime import datetime
from api.db import DiscussionsDB
from pathlib import Path
import importlib
from pyaipersonality import AIPersonality
from lollms import AIPersonality, lollms_path
from lollms.binding import BindingConfig
import multiprocessing as mp
import threading
import time
@ -80,7 +81,7 @@ def parse_requirements_file(requirements_path):
class ModelProcess:
def __init__(self, config=None):
def __init__(self, config:BindingConfig=None):
self.config = config
self.generate_queue = mp.Queue()
self.generation_queue = mp.Queue()
@ -89,6 +90,8 @@ class ModelProcess:
self.set_config_queue = mp.Queue(maxsize=1)
self.set_config_result_queue = mp.Queue(maxsize=1)
self.models_path = Path('models')
self.process = None
# Create synchronization objects
self.start_signal = mp.Event()
@ -134,7 +137,7 @@ class ModelProcess:
print(f"Loading binding {binding_name} install ON")
else:
print(f"Loading binding : {binding_name} install is off")
binding_path = Path("bindings")/binding_name
binding_path = lollms_path/"bindings_zoo"/binding_name
if install:
# first find out if there is a requirements.txt file
install_file_name="install.py"
@ -203,7 +206,7 @@ class ModelProcess:
def rebuild_binding(self, config):
try:
print(" ******************* Building Binding from main Process *************************")
binding = self.load_binding(config["binding"], install=True)
binding = self.load_binding(config["binding_name"], install=True)
print("Binding loaded successfully")
except Exception as ex:
print("Couldn't build binding.")
@ -215,19 +218,19 @@ class ModelProcess:
try:
self.reset_config_result()
print(" ******************* Building Binding from generation Process *************************")
self.binding = self.load_binding(self.config["binding"], install=True)
self.binding = self.load_binding(self.config["binding_name"], install=True)
print("Binding loaded successfully")
try:
model_file = Path("models")/self.config["binding"]/self.config["model"]
model_file = self.models_path/self.config["binding_name"]/self.config["model_name"]
print(f"Loading model : {model_file}")
self.model = self.binding(self.config)
self.model_ready.value = 1
print("Model created successfully\n")
except Exception as ex:
if self.config["model"] is None:
if self.config["model_name"] is None:
print("No model is selected.\nPlease select a backend and a model to start using the ui.")
else:
print(f"Couldn't build model {self.config['model']} : {ex}")
print(f"Couldn't build model {self.config['model_name']} : {ex}")
self.model = None
self._set_config_result['status'] ='failed'
self._set_config_result['binding_status'] ='failed'
@ -244,8 +247,9 @@ class ModelProcess:
def rebuild_personality(self):
try:
print(f" ******************* Building Personality {self.config['personality']} from main Process *************************")
personality_path = f"personalities/{self.config['personality_language']}/{self.config['personality_category']}/{self.config['personality']}"
personality = self.config['personalities'][self.config['default_personality_id']]
print(f" ******************* Building Personality {personality} from main Process *************************")
personality_path = lollms_path/f"personalities_zoo/{personality}"
personality = AIPersonality(personality_path, run_scripts=False)
print(f" ************ Personality {personality.name} is ready (Main process) ***************************")
except Exception as ex:
@ -259,8 +263,9 @@ class ModelProcess:
def _rebuild_personality(self):
try:
self.reset_config_result()
print(f" ******************* Building Personality {self.config['personality']} from generation Process *************************")
personality_path = f"personalities/{self.config['personality_language']}/{self.config['personality_category']}/{self.config['personality']}"
personality = self.config['personalities'][self.config['default_personality_id']]
print(f" ******************* Building Personality {personality} from generation Process *************************")
personality_path = lollms_path/f"personalities_zoo/{personality}"
self.personality = AIPersonality(personality_path)
print(f" ************ Personality {self.personality.name} is ready (generation process) ***************************")
except Exception as ex:
@ -429,16 +434,16 @@ class ModelProcess:
self.config = config
print("Changing configuration")
# verify that the binding is the same
if self.config["binding"]!=bk_cfg["binding"] or self.config["model"]!=bk_cfg["model"]:
if self.config["binding_name"]!=bk_cfg["binding_name"] or self.config["model_name"]!=bk_cfg["model_name"]:
self._rebuild_model()
# verify that the personality is the same
if self.config["personality"]!=bk_cfg["personality"] or self.config["personality_category"]!=bk_cfg["personality_category"] or self.config["personality_language"]!=bk_cfg["personality_language"]:
if self.config["personalities"][-1]!=bk_cfg["personalities"][-1]:
self._rebuild_personality()
class GPT4AllAPI():
def __init__(self, config:dict, socketio, config_file_path:str) -> None:
class LoLLMsAPPI():
def __init__(self, config:BindingConfig, socketio, config_file_path:str) -> None:
self.socketio = socketio
#Create and launch the process
self.process = ModelProcess(config)
@ -485,7 +490,7 @@ class GPT4AllAPI():
print("Install model triggered")
model_path = data["path"]
progress = 0
installation_dir = Path(f'./models/{self.config["binding"]}/')
installation_dir = Path(f'./models/{self.config["binding_name"]}/')
filename = Path(model_path).name
installation_path = installation_dir / filename
print("Model install requested")
@ -512,7 +517,7 @@ class GPT4AllAPI():
@socketio.on('uninstall_model')
def uninstall_model(data):
model_path = data['path']
installation_dir = Path(f'./models/{self.config["binding"]}/')
installation_dir = Path(f'./models/{self.config["binding_name"]}/')
filename = Path(model_path).name
installation_path = installation_dir / filename
@ -682,10 +687,8 @@ class GPT4AllAPI():
link_text = self.personality.link_text
if len(self.full_message_list) > self.config["nb_messages_to_remember"]:
discussion_messages = self.personality.personality_conditioning+ link_text.join(self.full_message_list[-self.config["nb_messages_to_remember"]:])
else:
discussion_messages = self.personality.personality_conditioning+ link_text.join(self.full_message_list)
discussion_messages = self.personality.personality_conditioning+ link_text.join(self.full_message_list)
return discussion_messages, message["content"]

View File

@ -72,7 +72,7 @@ class LLMBinding:
def list_models(config:dict):
"""Lists the models for this binding
"""
models_dir = Path('./models')/config["binding"] # replace with the actual path to the models folder
models_dir = Path('./models')/config["binding_name"] # replace with the actual path to the models folder
return [f.name for f in models_dir.glob(LLMBinding.file_extension)]
@staticmethod

117
app.py
View File

@ -24,7 +24,7 @@ import sys
from tqdm import tqdm
import subprocess
import signal
from pyaipersonality import AIPersonality
from lollms import AIPersonality, lollms_path
from api.db import DiscussionsDB, Discussion
from flask import (
Flask,
@ -45,6 +45,7 @@ import requests
from concurrent.futures import ThreadPoolExecutor, as_completed
import logging
import psutil
from lollms.binding import BindingConfig
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
@ -61,23 +62,24 @@ logging.basicConfig(level=logging.WARNING)
import time
from api.config import load_config, save_config
from api import GPT4AllAPI
from api import LoLLMsAPPI
import shutil
import markdown
class Gpt4AllWebUI(GPT4AllAPI):
def __init__(self, _app, _socketio, config:dict, config_file_path) -> None:
class LoLLMsWebUI(LoLLMsAPPI):
def __init__(self, _app, _socketio, config:BindingConfig, config_file_path) -> None:
super().__init__(config, _socketio, config_file_path)
self.app = _app
self.cancel_gen = False
if "use_new_ui" in self.config:
if self.config["use_new_ui"]:
app.template_folder = "web/dist"
app.template_folder = "web/dist"
self.personality_language= config["personalities"][config["default_personality_id"]].split("/")[0]
self.personality_category= config["personalities"][config["default_personality_id"]].split("/")[1]
self.personality_name= config["personalities"][config["default_personality_id"]].split("/")[2]
# =========================================================================================
# Endpoints
@ -259,7 +261,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
return 'App is resetting...'
def save_settings(self):
save_config(self.config, self.config_file_path)
self.config.save_config(self.config_file_path)
if self.config["debug"]:
print("Configuration saved")
# Tell that the setting was changed
@ -271,7 +273,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
return jsonify({"personality":self.personality.as_dict()})
def get_all_personalities(self):
personalities_folder = Path("./personalities")
personalities_folder = lollms_path/"personalities_zoo"
personalities = {}
for language_folder in personalities_folder.iterdir():
if language_folder.is_dir():
@ -390,30 +392,32 @@ class Gpt4AllWebUI(GPT4AllAPI):
self.config["language"]=data['setting_value']
elif setting_name== "personality_language":
self.config["personality_language"]=data['setting_value']
self.personality_language=data['setting_value']
elif setting_name== "personality_category":
self.config["personality_category"]=data['setting_value']
elif setting_name== "personality":
self.config["personality"]=data['setting_value']
personality_fn = f"personalities/{self.config['personality_language']}/{self.config['personality_category']}/{self.config['personality']}"
self.personality_category=data['setting_value']
elif setting_name== "personality_folder":
self.personality_name=data['setting_value']
personality_fn = lollms_path/f"personalities_zoo/{self.personality_language}/{self.personality_category}/{self.personality_name}"
self.personality.load_personality(personality_fn)
elif setting_name== "override_personality_model_parameters":
self.config["override_personality_model_parameters"]=bool(data['setting_value'])
elif setting_name== "model":
self.config["model"]=data['setting_value']
elif setting_name== "model_name":
self.config["model_name"]=data['setting_value']
print("update_settings : New model selected")
elif setting_name== "binding":
if self.config['binding']!= data['setting_value']:
elif setting_name== "binding_name":
if self.config['binding_name']!= data['setting_value']:
print(f"New binding selected : {data['setting_value']}")
self.config["binding"]=data['setting_value']
self.config["binding_name"]=data['setting_value']
try:
self.binding = self.process.load_binding(self.config["binding"], install=True)
self.binding = self.process.load_binding(self.config["binding_name"], install=True)
except Exception as ex:
print(f"Couldn't build binding: [{ex}]")
@ -446,7 +450,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
current_drive = Path.cwd().anchor
drive_disk_usage = psutil.disk_usage(current_drive)
try:
models_folder_disk_usage = psutil.disk_usage(f'./models/{self.config["binding"]}')
models_folder_disk_usage = psutil.disk_usage(f'./models/{self.config["binding_name"]}')
return jsonify({
"total_space":drive_disk_usage.total,
"available_space":drive_disk_usage.free,
@ -464,7 +468,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
})
def list_bindings(self):
bindings_dir = Path('./bindings') # replace with the actual path to the models folder
bindings_dir = lollms_path/'bindings_zoo' # replace with the actual path to the models folder
bindings=[]
for f in bindings_dir.iterdir():
card = f/"binding_card.yaml"
@ -472,8 +476,8 @@ class Gpt4AllWebUI(GPT4AllAPI):
try:
bnd = load_config(card)
bnd["folder"]=f.stem
icon_path = Path(f/"logo.png")
if icon_path.exists():
icon_path = Path(f"bindings/{f.name}/logo.png")
if Path(lollms_path/f"bindings_zoo/{f.name}/logo.png").exists():
bnd["icon"]=str(icon_path)
bindings.append(bnd)
@ -491,18 +495,18 @@ class Gpt4AllWebUI(GPT4AllAPI):
def list_personalities_languages(self):
personalities_languages_dir = Path(f'./personalities') # replace with the actual path to the models folder
personalities_languages_dir = lollms_path/f'personalities_zoo' # replace with the actual path to the models folder
personalities_languages = [f.stem for f in personalities_languages_dir.iterdir() if f.is_dir()]
return jsonify(personalities_languages)
def list_personalities_categories(self):
personalities_categories_dir = Path(f'./personalities/{self.config["personality_language"]}') # replace with the actual path to the models folder
personalities_categories_dir = lollms_path/f'personalities_zoo/{self.config["personalities"][self.config["default_personality_id"]].split("/")[0]}' # replace with the actual path to the models folder
personalities_categories = [f.stem for f in personalities_categories_dir.iterdir() if f.is_dir()]
return jsonify(personalities_categories)
def list_personalities(self):
try:
personalities_dir = Path(f'./personalities/{self.config["personality_language"]}/{self.config["personality_category"]}') # replace with the actual path to the models folder
personalities_dir = lollms_path/f'personalities_zoo/{"/".join(self.config["personalities"][self.config["default_personality_id"]].split("/")[0:2])}' # replace with the actual path to the models folder
personalities = [f.stem for f in personalities_dir.iterdir() if f.is_dir()]
except Exception as ex:
personalities=[]
@ -567,14 +571,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
def serve_static(self, filename):
root_dir = os.getcwd()
if "use_new_ui" in self.config:
if self.config["use_new_ui"]:
path = os.path.join(root_dir, 'web/dist/')+"/".join(filename.split("/")[:-1])
else:
path = os.path.join(root_dir, 'static/')+"/".join(filename.split("/")[:-1])
else:
path = os.path.join(root_dir, 'static/')+"/".join(filename.split("/")[:-1])
path = os.path.join(root_dir, 'web/dist/')+"/".join(filename.split("/")[:-1])
fn = filename.split("/")[-1]
return send_from_directory(path, fn)
@ -587,15 +584,13 @@ class Gpt4AllWebUI(GPT4AllAPI):
return send_from_directory(path, fn)
def serve_bindings(self, filename):
root_dir = os.getcwd()
path = os.path.join(root_dir, 'bindings/')+"/".join(filename.split("/")[:-1])
path = str(lollms_path/('bindings_zoo/'+"/".join(filename.split("/")[:-1])))
fn = filename.split("/")[-1]
return send_from_directory(path, fn)
def serve_personalities(self, filename):
root_dir = os.getcwd()
path = os.path.join(root_dir, 'personalities/')+"/".join(filename.split("/")[:-1])
path = str(lollms_path/('personalities_zoo/'+"/".join(filename.split("/")[:-1])))
fn = filename.split("/")[-1]
return send_from_directory(path, fn)
@ -710,16 +705,16 @@ class Gpt4AllWebUI(GPT4AllAPI):
def set_binding(self):
data = request.get_json()
binding = str(data["binding"])
if self.config['binding']!= binding:
if self.config['binding_name']!= binding:
print("New binding selected")
self.config['binding'] = binding
self.config['binding_name'] = binding
try:
binding_ =self.process.load_binding(config["binding"],True)
binding_ =self.process.load_binding(config["binding_name"],True)
models = binding_.list_models(self.config)
if len(models)>0:
self.binding = binding_
self.config['model'] = models[0]
self.config['model_name'] = models[0]
# Build chatbot
return jsonify(self.process.set_config(self.config))
else:
@ -731,10 +726,10 @@ class Gpt4AllWebUI(GPT4AllAPI):
def set_model(self):
data = request.get_json()
model = str(data["model"])
if self.config['model']!= model:
model = str(data["model_name"])
if self.config['model_name']!= model:
print("set_model: New model selected")
self.config['model'] = model
self.config['model_name'] = model
# Build chatbot
return jsonify(self.process.set_config(self.config))
@ -743,27 +738,27 @@ class Gpt4AllWebUI(GPT4AllAPI):
def update_model_params(self):
data = request.get_json()
binding = str(data["binding"])
model = str(data["model"])
model = str(data["model_name"])
personality_language = str(data["personality_language"])
personality_category = str(data["personality_category"])
personality = str(data["personality"])
if self.config['binding']!=binding or self.config['model'] != model:
if self.config['binding_name']!=binding or self.config['model_name'] != model:
print("update_model_params: New model selected")
self.config['binding'] = binding
self.config['model'] = model
self.config['binding_name'] = binding
self.config['model_name'] = model
self.config['personality_language'] = personality_language
self.config['personality_category'] = personality_category
self.config['personality'] = personality
personality_fn = f"personalities/{self.config['personality_language']}/{self.config['personality_category']}/{self.config['personality']}"
personality_fn = lollms_path/f"personalities_zoo/{self.personality_language}/{self.personality_category}/{self.personality_name}"
print(f"Loading personality : {personality_fn}")
self.config['n_predict'] = int(data["nPredict"])
self.config['seed'] = int(data["seed"])
self.config['model'] = str(data["model"])
self.config['model_name'] = str(data["model_name"])
self.config['voice'] = str(data["voice"])
self.config['language'] = str(data["language"])
@ -773,16 +768,16 @@ class Gpt4AllWebUI(GPT4AllAPI):
self.config['repeat_penalty'] = float(data["repeatPenalty"])
self.config['repeat_last_n'] = int(data["repeatLastN"])
save_config(self.config, self.config_file_path)
self.config.save_config(self.config_file_path)
# Fixed missing argument
self.binding = self.process.rebuild_binding(self.config)
print("==============================================")
print("Parameters changed to:")
print(f"\tBinding:{self.config['binding']}")
print(f"\tModel:{self.config['model']}")
print(f"\tBinding:{self.config['binding_name']}")
print(f"\tModel:{self.config['model_name']}")
print(f"\tPersonality language:{self.config['personality_language']}")
print(f"\tPersonality category:{self.config['personality_category']}")
print(f"\tPersonality:{self.config['personality']}")
@ -826,7 +821,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
path = f'{server}{filename}'
else:
path = f'{server}/{filename}'
local_path = Path(f'./models/{self.config["binding"]}/{filename}')
local_path = Path(f'./models/{self.config["binding_name"]}/{filename}')
is_installed = local_path.exists() or model_type.lower()=="api"
models.append({
'title': filename,
@ -849,7 +844,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
def get_config(self):
return jsonify(self.config)
return jsonify(self.config.to_dict())
def main(self):
return render_template("main.html")
@ -973,7 +968,7 @@ if __name__ == "__main__":
shutil.copy(f"configs/config.yaml", f"configs/local_config.yaml")
config_file_path = f"configs/{args.config}.yaml"
config = load_config(config_file_path)
config = BindingConfig(config_file_path, Path("./models"))
if "version" not in config or int(config["version"])<int(default_config["version"]):
@ -981,7 +976,7 @@ if __name__ == "__main__":
print("Configuration file is very old. Replacing with default configuration")
config, added, removed =sync_cfg(default_config, config)
print(f"Added entries : {added}, removed entries:{removed}")
save_config(config, config_file_path)
config.save_config(config_file_path)
# Override values in config with command-line arguments
for arg_name, arg_value in vars(args).items():
@ -990,7 +985,7 @@ if __name__ == "__main__":
# executor = ThreadPoolExecutor(max_workers=1)
# app.config['executor'] = executor
bot = Gpt4AllWebUI(app, socketio, config, config_file_path)
bot = LoLLMsWebUI(app, socketio, config, config_file_path)
# chong Define custom WebSocketHandler with error handling
class CustomWebSocketHandler(WebSocketHandler):

View File

View File

@ -1 +0,0 @@
config_local.yaml

View File

@ -1,134 +0,0 @@
######
# Project : GPT4ALL-UI
# File : binding.py
# Author : ParisNeo with the help of the community
# Underlying binding : Abdeladim's pygptj binding
# Supported by Nomic-AI
# license : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui bindings.
# This binding is a wrapper to marella's binding
######
from pathlib import Path
from typing import Callable
from api.binding import LLMBinding
import yaml
from api.config import load_config
import re
__author__ = "parisneo"
__github__ = "https://github.com/ParisNeo/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
binding_name = "CustomBinding"
class CustomBinding(LLMBinding):
# Define what is the extension of the model files supported by your binding
# Only applicable for local models for remote models like gpt4 and others, you can keep it empty
# and reimplement your own list_models method
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a LLAMACPP binding
Args:
config (dict): The configuration file
"""
super().__init__(config, False)
# The local config can be used to store personal information that shouldn't be shared like chatgpt Key
# or other personal information
# This file is never commited to the repository as it is ignored by .gitignore
# You can remove this if you don't need custom local configurations
self._local_config_file_path = Path(__file__).parent/"config_local.yaml"
self.config = load_config(self._local_config_file_path)
# Do your initialization stuff
def tokenize(self, prompt):
"""
Tokenizes the given prompt using the model's tokenizer.
Args:
prompt (str): The input prompt to be tokenized.
Returns:
list: A list of tokens representing the tokenized prompt.
"""
return None
def detokenize(self, tokens_list):
"""
Detokenizes the given list of tokens using the model's tokenizer.
Args:
tokens_list (list): A list of tokens to be detokenized.
Returns:
str: The detokenized text as a string.
"""
return None
def generate(self,
prompt:str,
n_predict: int = 128,
new_text_callback: Callable[[str], None] = bool,
verbose: bool = False,
**gpt_params ):
"""Generates text out of a prompt
Args:
prompt (str): The prompt to use for generation
n_predict (int, optional): Number of tokens to prodict. Defaults to 128.
new_text_callback (Callable[[str], None], optional): A callback function that is called everytime a new text element is generated. Defaults to None.
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
"""
try:
output = ""
count = 0
generated_text = """
This is an empty binding that shows how you can build your own binding.
Find it in bindings.
```python
# This is a python snippet
print("Hello World")
```
This is a photo
![](/images/icon.png)
"""
for tok in re.split(r'(\s+)', generated_text):
if count >= n_predict:
break
word = tok
if new_text_callback is not None:
if not new_text_callback(word):
break
output += word
count += 1
except Exception as ex:
print(ex)
return output
# Decomment if you want to build a custom model listing
#@staticmethod
#def list_models(config:dict):
# """Lists the models for this binding
# """
# models_dir = Path('./models')/config["binding"] # replace with the actual path to the models folder
# return [f.name for f in models_dir.glob(LLMBinding.file_extension)]
#
@staticmethod
def get_available_models():
# Create the file path relative to the child class's directory
binding_path = Path(__file__).parent
file_path = binding_path/"models.yaml"
with open(file_path, 'r') as file:
yaml_data = yaml.safe_load(file)
return yaml_data

View File

@ -1,5 +0,0 @@
name: This is a binding template (Not usable)
author: the binding author
version: the binding version
link: Link to the binding repository
description: the binding version

View File

@ -1,61 +0,0 @@
import subprocess
from pathlib import Path
import requests
from tqdm import tqdm
from api.config import save_config
class Install:
def __init__(self, api):
# Get the current directory
current_dir = Path(__file__).resolve().parent
install_file = current_dir / ".installed"
if not install_file.exists():
print("-------------- Template binding -------------------------------")
print("This is the first time you are using this binding.")
print("Installing ...")
# Example of installing py torche
"""
try:
print("Checking pytorch")
import torch
import torchvision
if torch.cuda.is_available():
print("CUDA is supported.")
else:
print("CUDA is not supported. Reinstalling PyTorch with CUDA support.")
self.reinstall_pytorch_with_cuda()
except Exception as ex:
self.reinstall_pytorch_with_cuda()
"""
# Step 2: Install dependencies using pip from requirements.txt
requirements_file = current_dir / "requirements.txt"
subprocess.run(["pip", "install", "--upgrade", "--no-cache-dir", "-r", str(requirements_file)])
# Create the models folder
models_folder = Path(f"./models/{Path(__file__).parent.stem}")
models_folder.mkdir(exist_ok=True, parents=True)
# The local config can be used to store personal information that shouldn't be shared like chatgpt Key
# or other personal information
# This file is never commited to the repository as it is ignored by .gitignore
# You can remove this if you don't need custom local configurations
self._local_config_file_path = Path(__file__).parent/"config_local.yaml"
if not self._local_config_file_path.exists():
config = {
#Put your default configurations here
}
save_config(config, self._local_config_file_path)
#Create the install file (a file that is used to insure the installation was done correctly)
with open(install_file,"w") as f:
f.write("ok")
print("Installed successfully")
def reinstall_pytorch_with_cuda(self):
"""Installs pytorch with cuda (if you have a gpu)
"""
subprocess.run(["pip", "install", "torch", "torchvision", "torchaudio", "--no-cache-dir", "--index-url", "https://download.pytorch.org/whl/cu117"])

Binary file not shown.

Before

Width:  |  Height:  |  Size: 237 KiB

View File

@ -1,8 +0,0 @@
- description: The description of the model
filename: the file to be loaded
license: The licence
owner_link: https://link_to_the_owner_web_page
owner: Owner_name
server: https://Path_to_the_server_to_load_from
sha256: The Hash code of the file
model_type: api # api or file

View File

@ -1,143 +0,0 @@
######
# Project : GPT4ALL-UI
# File : binding.py
# Author : ParisNeo with the help of the community
# Underlying binding : Abdeladim's pygptj binding
# Supported by Nomic-AI
# license : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui bindings.
# This binding is a wrapper to marella's binding
######
from pathlib import Path
from typing import Callable
from api.binding import LLMBinding
import yaml
from ctransformers import AutoModelForCausalLM
__author__ = "parisneo"
__github__ = "https://github.com/ParisNeo/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
binding_name = "CTRansformers"
class CTRansformers(LLMBinding):
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a LLAMACPP binding
Args:
config (dict): The configuration file
"""
super().__init__(config, False)
if 'gpt2' in self.config['model']:
model_type='gpt2'
elif 'gptj' in self.config['model']:
model_type='gptj'
elif 'gpt_neox' in self.config['model']:
model_type='gpt_neox'
elif 'dolly-v2' in self.config['model']:
model_type='dolly-v2'
elif 'starcoder' in self.config['model']:
model_type='starcoder'
elif 'llama' in self.config['model'].lower() or 'wizardlm' in self.config['model'].lower() or 'vigogne' in self.config['model'].lower():
model_type='llama'
elif 'mpt' in self.config['model']:
model_type='mpt'
else:
print("The model you are using is not supported by this binding")
return
if self.config["use_avx2"]:
self.model = AutoModelForCausalLM.from_pretrained(
f"./models/c_transformers/{self.config['model']}", model_type=model_type
)
else:
self.model = AutoModelForCausalLM.from_pretrained(
f"./models/c_transformers/{self.config['model']}", model_type=model_type, lib = "avx"
)
def tokenize(self, prompt):
"""
Tokenizes the given prompt using the model's tokenizer.
Args:
prompt (str): The input prompt to be tokenized.
Returns:
list: A list of tokens representing the tokenized prompt.
"""
return self.model.tokenize(prompt.encode())
def detokenize(self, tokens_list):
"""
Detokenizes the given list of tokens using the model's tokenizer.
Args:
tokens_list (list): A list of tokens to be detokenized.
Returns:
str: The detokenized text as a string.
"""
return self.model.detokenize(tokens_list)
def generate(self,
prompt:str,
n_predict: int = 128,
new_text_callback: Callable[[str], None] = bool,
verbose: bool = False,
**gpt_params ):
"""Generates text out of a prompt
Args:
prompt (str): The prompt to use for generation
n_predict (int, optional): Number of tokens to prodict. Defaults to 128.
new_text_callback (Callable[[str], None], optional): A callback function that is called everytime a new text element is generated. Defaults to None.
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
"""
try:
output = ""
#self.model.reset()
tokens = self.model.tokenize(prompt)
count = 0
for tok in self.model.generate(
tokens,
top_k=gpt_params['top_k'],
top_p=gpt_params['top_p'],
temperature=gpt_params['temperature'],
repetition_penalty=gpt_params['repeat_penalty'],
seed=self.config['seed'],
batch_size=1,
threads = self.config['n_threads'],
reset=True,
):
if count >= n_predict or self.model.is_eos_token(tok):
break
word = self.model.detokenize(tok)
if new_text_callback is not None:
if not new_text_callback(word):
break
output += word
count += 1
except Exception as ex:
print(ex)
return output
@staticmethod
def get_available_models():
# Create the file path relative to the child class's directory
binding_path = Path(__file__).parent
file_path = binding_path/"models.yaml"
with open(file_path, 'r') as file:
yaml_data = yaml.safe_load(file)
return yaml_data

View File

@ -1,15 +0,0 @@
name: C Transformer
author: marella
version: 1.0
link: https://github.com/marella/ctransformers
description: 'Python bindings for the Transformer models implemented in C/C++ using GGML library.
Supported Models
Models Model Type
GPT-2 gpt2
GPT-J, GPT4All-J gptj
GPT-NeoX, StableLM gpt_neox
LLaMA llama
MPT mpt
Dolly V2 dolly-v2
StarCoder starcoder
'

View File

@ -1,45 +0,0 @@
import subprocess
from pathlib import Path
import requests
from tqdm import tqdm
class Install:
def __init__(self, api):
# Get the current directory
current_dir = Path(__file__).resolve().parent
install_file = current_dir / ".installed"
if not install_file.exists():
print("-------------- cTransformers binding -------------------------------")
print("This is the first time you are using this binding.")
print("Installing ...")
"""
try:
print("Checking pytorch")
import torch
import torchvision
if torch.cuda.is_available():
print("CUDA is supported.")
else:
print("CUDA is not supported. Reinstalling PyTorch with CUDA support.")
self.reinstall_pytorch_with_cuda()
except Exception as ex:
self.reinstall_pytorch_with_cuda()
"""
# Step 2: Install dependencies using pip from requirements.txt
requirements_file = current_dir / "requirements.txt"
subprocess.run(["pip", "install", "--upgrade", "--no-cache-dir", "-r", str(requirements_file)])
# Create ther models folder
models_folder = Path("./models/c_transformers")
models_folder.mkdir(exist_ok=True, parents=True)
#Create the install file
with open(install_file,"w") as f:
f.write("ok")
print("Installed successfully")
def reinstall_pytorch_with_cuda(self):
subprocess.run(["pip", "install", "torch", "torchvision", "torchaudio", "--no-cache-dir", "--index-url", "https://download.pytorch.org/whl/cu117"])

Binary file not shown.

Before

Width:  |  Height:  |  Size: 268 KiB

View File

@ -1,67 +0,0 @@
- LLAMA: 'true'
description: GGML format model files for the original LLaMa
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: llama-7b.ggmlv3.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke/
owner: TheBloke
server: https://huggingface.co/TheBloke/LLaMa-7B-GGML/resolve/main/
sha256: ec2f2d1f0dfb73b72a4cbac7fa121abbe04c37ab327125a38248f930c0f09ddf
- LLAMA: 'true'
description: GGML format model files for Wizard LM 7B model
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: wizardLM-7B.ggmlv3.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke/
owner: TheBloke
server: https://huggingface.co/TheBloke/wizardLM-7B-GGML/resolve/main/
sha256: ea35e30a7c140485b856d0919284ce59e4ca47c1b8af037ea8b7ba05ef291c43
- LLAMA: 'true'
description: GGML format model files for Wizard LM 7B model
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: koala-7b.ggml.unquantized.pr613.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke/
owner: TheBloke
server: https://huggingface.co/TheBloke/koala-7b-ggml-unquantized/resolve/main/
sha256: c478ceced3b38800cb768225b1e759a32c9e89bd33606fb38eeff3b811e28371
- MPT-7B: 'true'
description: MPT-7B
filename: mpt-7b.ggmlv3.q5_1.bin
license: Apache-2.0
owner_link: https://huggingface.co/TheBloke/
owner: TheBloke
server: https://huggingface.co/TheBloke/MPT-7B-GGML/resolve/main/
sha256: c947c38405921a199c603fed2ed63486811777ba370bb51c40c3132e5cfe84e5
- MPT-7B-Instruct: 'true'
description: MPT-7B-Instruct
filename: mpt-7b-instruct.ggmlv3.q5_1.bin
license: Apache-2.0
owner_link: https://huggingface.co/TheBloke/
owner: TheBloke
server: https://huggingface.co/TheBloke/MPT-7B-Instruct-GGML/resolve/main/
sha256: a4d17a39ac277d48a3d55aa74b36a4e6e1b891b58937a838243fad549d26c686
- MPT-7B-Storywriter: 'true'
description: MPT-7B-Storywriter 4 Bits
filename: mpt-7b-storywriter.ggmlv3.q4_0.bin
license: Apache-2.0
owner_link: https://huggingface.co/TheBloke/
owner: TheBloke
server: https://huggingface.co/TheBloke/MPT-7B-Storywriter-GGML/resolve/main/
sha256: 357a536464982987e49fb2660fe3f3f53226eaa047f42b31f04d21629aab94fb
- MPT-7B-Storywriter: 'true'
description: MPT-7B-Storywriter 5.1 Bits
filename: mpt-7b-storywriter.ggmlv3.q5_1.bin
license: Apache-2.0
owner_link: https://huggingface.co/TheBloke/
owner: TheBloke
server: https://huggingface.co/TheBloke/MPT-7B-Storywriter-GGML/resolve/main/
sha256: 3b7dd7aa7508cc8cb4e262fe4b93214826f38d18d04059075e05837457f5402
- description: Vigogne Instruct 13B - A French instruction-following LLaMa model GGML
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: Vigogne-Instruct-13B.ggmlv3.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke
owner: TheBloke
server: https://huggingface.co/TheBloke/Vigogne-Instruct-13B-GGML/resolve/main/
sha256: 14bcd51fbc09bcc456c79f9f0e4d070ad536e9d9e03811232e037b62580fe5e7

View File

@ -1,3 +0,0 @@
ctransformers
transformers
accelerate

View File

@ -1 +0,0 @@
pdf_file_path: ''

View File

@ -1,53 +0,0 @@
import argparse
import yaml
from urllib.parse import urlparse
from pathlib import Path
def process_yaml(input_file):
# Read YAML file
with open(input_file, 'r') as file:
models = yaml.safe_load(file)
# Process each model entry
for model in models:
server_url = model['server']
parsed_url = urlparse(server_url)
if not 'owner' in model:
if 'huggingface.co' in parsed_url.netloc:
# Hugging Face URL, extract owner from server URL
model['owner'] = parsed_url.path.split('/')[1]
else:
# Non-Hugging Face URL, use domain name as owner
model['owner'] = parsed_url.netloc
# Add additional fields
if not 'link' in model:
model['link'] = server_url
if not 'license' in model:
model['license'] = 'Non commercial'
# Save processed YAML file
output_file = input_file.stem + '_processed.yaml'
with open(output_file, 'w') as file:
yaml.dump(models, file)
print(f"Processed YAML file saved as {output_file}")
def main():
# Parse command-line arguments
parser = argparse.ArgumentParser(description='Process YAML file')
parser.add_argument('input_file', type=str, help='Input YAML file')
args = parser.parse_args()
input_file = Path(args.input_file)
if not input_file.exists():
print('Input file does not exist.')
return
process_yaml(input_file)
if __name__ == '__main__':
main()

View File

@ -1,120 +0,0 @@
######
# Project : GPT4ALL-UI
# File : binding.py
# Author : ParisNeo with the help of the community
# Supported by Nomic-AI
# license : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui bindings.
# This binding is a wrapper to gpt4all's official binding
# Follow him on his github project : https://github.com/ParisNeo/gpt4all
######
from pathlib import Path
from typing import Callable
from gpt4all import GPT4All
from api.binding import LLMBinding
import yaml
__author__ = "parisneo"
__github__ = "https://github.com/ParisNeo/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
binding_name = "GPT4ALL"
from gpt4all import GPT4All
class GPT4ALL(LLMBinding):
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a GPT4ALL binding
Args:
config (dict): The configuration file
"""
super().__init__(config, False)
self.model = GPT4All.get_model_from_name(self.config['model'])
self.model.load_model(
model_path=f"./models/gpt_4all/{self.config['model']}"
)
def tokenize(self, prompt):
"""
Tokenizes the given prompt using the model's tokenizer.
Args:
prompt (str): The input prompt to be tokenized.
Returns:
list: A list of tokens representing the tokenized prompt.
"""
return None
def detokenize(self, tokens_list):
"""
Detokenizes the given list of tokens using the model's tokenizer.
Args:
tokens_list (list): A list of tokens to be detokenized.
Returns:
str: The detokenized text as a string.
"""
return None
def generate(self,
prompt:str,
n_predict: int = 128,
new_text_callback: Callable[[str], None] = bool,
verbose: bool = False,
**gpt_params ):
"""Generates text out of a prompt
Args:
prompt (str): The prompt to use for generation
n_predict (int, optional): Number of tokens to prodict. Defaults to 128.
new_text_callback (Callable[[str], None], optional): A callback function that is called everytime a new text element is generated. Defaults to None.
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
"""
try:
response_text = []
def local_callback(token_id, response):
decoded_word = response.decode('utf-8')
response_text.append( decoded_word )
if new_text_callback is not None:
if not new_text_callback(decoded_word):
return False
# Do whatever you want with decoded_token here.
return True
self.model._response_callback = local_callback
self.model.generate(prompt,
n_predict=n_predict,
temp=gpt_params["temperature"],
top_k=gpt_params['top_k'],
top_p=gpt_params['top_p'],
repeat_penalty=gpt_params['repeat_penalty'],
repeat_last_n = self.config['repeat_last_n'],
# n_threads=self.config['n_threads'],
streaming=False,
)
except Exception as ex:
print(ex)
return ''.join(response_text)
@staticmethod
def get_available_models():
# Create the file path relative to the child class's directory
binding_path = Path(__file__).parent
file_path = binding_path/"models.yaml"
with open(file_path, 'r') as file:
yaml_data = yaml.safe_load(file)
return yaml_data

View File

@ -1,6 +0,0 @@
name: gpt4all
author: Nomic-ai
version: 1.0
link: https://github.com/nomic-ai/gpt4all
description:
Python bindings for the gpt4all models by Nomic-AI

View File

@ -1,46 +0,0 @@
import subprocess
from pathlib import Path
import requests
from tqdm import tqdm
class Install:
def __init__(self, api):
# Get the current directory
current_dir = Path(__file__).resolve().parent
install_file = current_dir / ".installed"
if not install_file.exists():
print("-------------- GPT4All binding by nomic-ai -------------------------------")
print("This is the first time you are using this binding.")
print("Installing ...")
"""
try:
print("Checking pytorch")
import torch
import torchvision
if torch.cuda.is_available():
print("CUDA is supported.")
else:
print("CUDA is not supported. Reinstalling PyTorch with CUDA support.")
self.reinstall_pytorch_with_cuda()
except Exception as ex:
self.reinstall_pytorch_with_cuda()
"""
# Step 2: Install dependencies using pip from requirements.txt
requirements_file = current_dir / "requirements.txt"
subprocess.run(["pip", "install", "--no-cache-dir", "-r", str(requirements_file)])
# Create ther models folder
models_folder = Path("./models/gpt_4all")
models_folder.mkdir(exist_ok=True, parents=True)
#Create the install file
with open(install_file,"w") as f:
f.write("ok")
print("Installed successfully")
def reinstall_pytorch_with_cuda(self):
subprocess.run(["pip", "install", "torch", "torchvision", "torchaudio", "--no-cache-dir", "--index-url", "https://download.pytorch.org/whl/cu117"])

View File

@ -1,27 +0,0 @@
import json
import yaml
from pathlib import Path
import argparse
def json_to_yaml(json_file):
# Read JSON file
with open(json_file, 'r') as file:
json_data = json.load(file)
# Create YAML file path
yaml_file = Path(json_file).with_suffix('.yaml')
# Convert JSON to YAML
with open(yaml_file, 'w') as file:
yaml.dump(json_data, file)
print(f"Conversion complete. YAML file saved as: {yaml_file}")
if __name__ == "__main__":
# Parse command-line arguments
parser = argparse.ArgumentParser(description='Convert JSON file to YAML.')
parser.add_argument('json_file', help='Path to the JSON file')
args = parser.parse_args()
# Convert JSON to YAML
json_to_yaml(args.json_file)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

View File

@ -1,101 +0,0 @@
[
{
"md5sum": "81a09a0ddf89690372fc296ff7f625af",
"filename": "ggml-gpt4all-j-v1.3-groovy.bin",
"server":"https://gpt4all.io/models/",
"filesize": "3785248281",
"isDefault": "true",
"bestGPTJ": "true",
"description": "Current best commercially licensable model based on GPT-J and trained by Nomic AI on the latest curated GPT4All dataset."
},
{
"md5sum": "91f886b68fbce697e9a3cd501951e455",
"filename": "ggml-gpt4all-l13b-snoozy.bin",
"server":"https://gpt4all.io/models/",
"filesize": "8136770688",
"bestLlama": "true",
"description": "Current best non-commercially licensable model based on Llama 13b and trained by Nomic AI on the latest curated GPT4All dataset."
},
{
"md5sum": "756249d3d6abe23bde3b1ae272628640",
"filename": "ggml-mpt-7b-chat.bin",
"server":"https://gpt4all.io/models/",
"filesize": "4854401050",
"isDefault": "true",
"bestMPT": "true",
"requires": "2.4.1",
"description": "Current best non-commercially licensable chat model based on MPT and trained by Mosaic ML."
},
{
"md5sum": "879344aaa9d62fdccbda0be7a09e7976",
"filename": "ggml-gpt4all-j-v1.2-jazzy.bin",
"server":"https://gpt4all.io/models/",
"filesize": "3785248281",
"description": "A commercially licensable model based on GPT-J and trained by Nomic AI on the v2 GPT4All dataset."
},
{
"md5sum": "61d48a82cb188cceb14ebb8082bfec37",
"filename": "ggml-gpt4all-j-v1.1-breezy.bin",
"server":"https://gpt4all.io/models/",
"filesize": "3785248281",
"description": "A commercially licensable model based on GPT-J and trained by Nomic AI on the v1 GPT4All dataset."
},
{
"md5sum": "5b5a3f9b858d33b29b52b89692415595",
"filename": "ggml-gpt4all-j.bin",
"server":"https://gpt4all.io/models/",
"filesize": "3785248281",
"description": "A commercially licensable model based on GPT-J and trained by Nomic AI on the v0 GPT4All dataset."
},
{
"md5sum": "29119f8fa11712704c6b22ac5ab792ea",
"filename": "ggml-vicuna-7b-1.1-q4_2.bin",
"server":"https://gpt4all.io/models/",
"filesize": "4212859520",
"description": "A non-commercially licensable model based on Llama 7b and trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego."
},
{
"md5sum": "95999b7b0699e2070af63bf5d34101a8",
"filename": "ggml-vicuna-13b-1.1-q4_2.bin",
"server":"https://gpt4all.io/models/",
"filesize": "8136770688",
"description": "A non-commercially licensable model based on Llama 13b and trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego."
},
{
"md5sum": "99e6d129745a3f1fb1121abed747b05a",
"filename": "ggml-wizardLM-7B.q4_2.bin",
"server":"https://gpt4all.io/models/",
"filesize": "4212864640",
"description": "A non-commercially licensable model based on Llama 7b and trained by Microsoft and Peking University."
},
{
"md5sum": "6cb4ee297537c9133bddab9692879de0",
"filename": "ggml-stable-vicuna-13B.q4_2.bin",
"server":"https://gpt4all.io/models/",
"filesize": "8136777088",
"description": "A non-commercially licensable model based on Llama 13b and RLHF trained by Stable AI."
},
{
"md5sum": "120c32a51d020066288df045ef5d52b9",
"filename": "ggml-mpt-7b-base.bin",
"server":"https://gpt4all.io/models/",
"filesize": "4854401028",
"requires": "2.4.1",
"description": "A commercially licensable model base pre-trained by Mosaic ML."
},
{
"md5sum": "d5eafd5b0bd0d615cfd5fd763f642dfe",
"filename": "ggml-nous-gpt4-vicuna-13b.bin",
"server":"https://gpt4all.io/models/",
"filesize": "8136777088",
"description": "A non-commercially licensable model based on Vicuna 13b, fine-tuned on ~180,000 instructions, trained by Nous Research."
},
{
"md5sum": "1cfa4958f489f0a0d1ffdf6b37322809",
"filename": "ggml-mpt-7b-instruct.bin",
"server":"https://gpt4all.io/models/",
"filesize": "4854401028",
"requires": "2.4.1",
"description": "A commericially licensable instruct model based on MPT and trained by Mosaic ML."
}
]

View File

@ -1,142 +0,0 @@
- bestMPT: 'true'
icon: https://cdn-1.webcatalog.io/catalog/mosaicml/mosaicml-icon-filled-256.png?v=1675590559063
description: Current best non-commercially licensable chat model based on MPT and
trained by Mosaic ML.
filename: ggml-mpt-7b-chat.bin
filesize: '4854401050'
isDefault: 'true'
license: Non commercial
owner_link: https://gpt4all.io
md5sum: 756249d3d6abe23bde3b1ae272628640
owner: Nomic AI
requires: 2.4.1
server: https://gpt4all.io/models/
- description: A commericially licensable instruct model based on MPT and trained
by Mosaic ML.
icon: https://cdn-1.webcatalog.io/catalog/mosaicml/mosaicml-icon-filled-256.png?v=1675590559063
filename: ggml-mpt-7b-instruct.bin
filesize: '4854401028'
license: Apache 2.0
owner_link: https://gpt4all.io
md5sum: 1cfa4958f489f0a0d1ffdf6b37322809
owner: Nomic AI
server: https://gpt4all.io/models/
- description: A commercially licensable model base pre-trained by Mosaic ML.
icon: https://cdn-1.webcatalog.io/catalog/mosaicml/mosaicml-icon-filled-256.png?v=1675590559063
filename: ggml-mpt-7b-base.bin
filesize: '4854401028'
license: Non commercial
owner_link: https://gpt4all.io
md5sum: 120c32a51d020066288df045ef5d52b9
owner: Nomic AI
requires: 2.4.1
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Llama 7b and trained by
teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.
icon: https://cdn-1.webcatalog.io/catalog/mosaicml/mosaicml-icon-filled-256.png?v=1675590559063
filename: ggml-vicuna-7b-1.1-q4_2.bin
filesize: '4212859520'
license: Non commercial
owner_link: https://gpt4all.io
md5sum: 29119f8fa11712704c6b22ac5ab792ea
owner: Nomic AI
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Llama 7b and trained by
Microsoft and Peking University.
filename: ggml-wizardLM-7B.q4_2.bin
filesize: '4212864640'
license: Non commercial
owner_link: https://gpt4all.io
md5sum: 99e6d129745a3f1fb1121abed747b05a
owner: Nomic AI
server: https://gpt4all.io/models/
- md5sum: 679fc463f01388ea2d339664af0a0836
filename: ggml-wizard-13b-uncensored.bin
server: https://gpt4all.io/models/
filesize: 8136777088
owner: Nomic AI
owner_link: https://gpt4all.io
description: A non-commercially licensable model based on Wizard Vicuna 13b.
- md5sum: d5eafd5b0bd0d615cfd5fd763f642dfe
filename: ggml-nous-gpt4-vicuna-13b.bin
server: https://gpt4all.io/models/
filesize: 8136777088
description: A non-commercially licensable model based on Vicuna 13b, fine-tuned on ~180,000 instructions, trained by Nous Research.
owner: Nomic AI
owner_link: https://gpt4all.io
- bestGPTJ: 'true'
description: Current best commercially licensable model based on GPT-J and trained
by Nomic AI on the latest curated GPT4All dataset.
filename: ggml-gpt4all-j-v1.3-groovy.bin
filesize: '3785248281'
isDefault: 'true'
license: Apache 2.0
owner_link: https://gpt4all.io
md5sum: 81a09a0ddf89690372fc296ff7f625af
owner: Nomic AI
server: https://gpt4all.io/models/
- bestLlama: 'true'
description: Current best non-commercially licensable model based on Llama 13b and
trained by Nomic AI on the latest curated GPT4All dataset.
filename: ggml-gpt4all-l13b-snoozy.bin
filesize: '8136770688'
license: Non commercial
owner_link: https://gpt4all.io
md5sum: 91f886b68fbce697e9a3cd501951e455
owner: Nomic AI
server: https://gpt4all.io/models/
- description: A commercially licensable model based on GPT-J and trained by Nomic
AI on the v2 GPT4All dataset.
filename: ggml-gpt4all-j-v1.2-jazzy.bin
filesize: '3785248281'
license: Apache 2.0
owner_link: https://gpt4all.io
md5sum: 879344aaa9d62fdccbda0be7a09e7976
owner: Nomic AI
server: https://gpt4all.io/models/
- description: A commercially licensable model based on GPT-J and trained by Nomic
AI on the v1 GPT4All dataset.
filename: ggml-gpt4all-j-v1.1-breezy.bin
filesize: '3785248281'
license: Apache 2.0
owner_link: https://gpt4all.io
md5sum: 61d48a82cb188cceb14ebb8082bfec37
owner: Nomic AI
server: https://gpt4all.io/models/
- description: A commercially licensable model based on GPT-J and trained by Nomic
AI on the v0 GPT4All dataset.
filename: ggml-gpt4all-j.bin
filesize: '3785248281'
license: Apache 2.0
owner_link: https://gpt4all.io
md5sum: 5b5a3f9b858d33b29b52b89692415595
owner: Nomic AI
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Llama 13b and trained
by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.
filename: ggml-vicuna-13b-1.1-q4_2.bin
filesize: '8136770688'
license: Non commercial
owner_link: https://gpt4all.io
md5sum: 95999b7b0699e2070af63bf5d34101a8
owner: Nomic AI
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Llama 13b and RLHF trained
by Stable AI.
filename: ggml-stable-vicuna-13B.q4_2.bin
filesize: '8136777088'
license: Non commercial
owner_link: https://gpt4all.io
md5sum: 6cb4ee297537c9133bddab9692879de0
owner: Nomic AI
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Vicuna 13b, fine-tuned
on ~180,000 instructions, trained by Nous Research.
filename: ggml-nous-gpt4-vicuna-13b.bin
filesize: '8136777088'
license: Non commercial
owner_link: https://gpt4all.io/models/
md5sum: d5eafd5b0bd0d615cfd5fd763f642dfe
owner: gpt4all.io
server: https://gpt4all.io/models/

View File

@ -1 +0,0 @@
gpt4all

View File

@ -1,96 +0,0 @@
######
# Project : GPT4ALL-UI
# File : binding.py
# Author : ParisNeo with the help of the community
# Underlying binding : Abdeladim's pygptj binding
# Supported by Nomic-AI
# license : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui bindings.
# This binding is a wrapper to abdeladim's binding
# Follow him on his github project : https://github.com/abdeladim-s/pygptj
######
from pathlib import Path
from typing import Callable
from pygptj.model import Model
from api.binding import LLMBinding
__author__ = "parisneo"
__github__ = "https://github.com/ParisNeo/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
binding_name = "GptJ"
class GptJ(LLMBinding):
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a LLAMACPP binding
Args:
config (dict): The configuration file
"""
super().__init__(config, False)
self.model = Model(
model_path=f"./models/gpt_j/{self.config['model']}",
prompt_context="", prompt_prefix="", prompt_suffix=""
)
def tokenize(self, prompt):
"""
Tokenizes the given prompt using the model's tokenizer.
Args:
prompt (str): The input prompt to be tokenized.
Returns:
list: A list of tokens representing the tokenized prompt.
"""
return None
def detokenize(self, tokens_list):
"""
Detokenizes the given list of tokens using the model's tokenizer.
Args:
tokens_list (list): A list of tokens to be detokenized.
Returns:
str: The detokenized text as a string.
"""
return None
def generate(self,
prompt:str,
n_predict: int = 128,
new_text_callback: Callable[[str], None] = bool,
verbose: bool = False,
**gpt_params ):
"""Generates text out of a prompt
Args:
prompt (str): The prompt to use for generation
n_predict (int, optional): Number of tokens to prodict. Defaults to 128.
new_text_callback (Callable[[str], None], optional): A callback function that is called everytime a new text element is generated. Defaults to None.
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
"""
try:
self.model.reset()
output = ""
for tok in self.model.generate(prompt,
n_predict=n_predict,
temp=gpt_params["temperature"],
top_k=gpt_params['top_k'],
top_p=gpt_params['top_p'],
#repeat_penalty=gpt_params['repeat_penalty'],
#repeat_last_n = self.config['repeat_last_n'],
n_threads=self.config['n_threads'],
):
output += tok
if new_text_callback is not None:
if not new_text_callback(tok):
return output
except Exception as ex:
print(ex)
return output

View File

@ -1,43 +0,0 @@
import subprocess
from pathlib import Path
import requests
from tqdm import tqdm
class Install:
def __init__(self, api):
# Get the current directory
current_dir = Path(__file__).resolve().parent
install_file = current_dir / ".installed"
if not install_file.exists():
print("-------------- GPTj binding by abdeladim -------------------------------")
print("This is the first time you are using this binding.")
print("Installing ...")
try:
print("Checking pytorch")
import torch
import torchvision
if torch.cuda.is_available():
print("CUDA is supported.")
else:
print("CUDA is not supported. Reinstalling PyTorch with CUDA support.")
self.reinstall_pytorch_with_cuda()
except Exception as ex:
self.reinstall_pytorch_with_cuda()
# Step 2: Install dependencies using pip from requirements.txt
requirements_file = current_dir / "requirements.txt"
subprocess.run(["pip", "install", "--no-cache-dir", "-r", str(requirements_file)])
# Create ther models folder
models_folder = Path("./models/c_transformers")
models_folder.mkdir(exist_ok=True, parents=True)
#Create the install file
with open(install_file,"w") as f:
f.write("ok")
print("Installed successfully")
def reinstall_pytorch_with_cuda(self):
subprocess.run(["pip", "install", "torch", "torchvision", "torchaudio", "--no-cache-dir", "--index-url", "https://download.pytorch.org/whl/cu117"])

View File

@ -1,100 +0,0 @@
- bestGPTJ: 'true'
owner: Nomic AI
owner_link: https://gpt4all.io
description: Current best commercially licensable model based on GPT-J and trained
by Nomic AI on the latest curated GPT4All dataset.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-gpt4all-j-v1.3-groovy.bin
filesize: '3785248281'
isDefault: 'true'
md5sum: 81a09a0ddf89690372fc296ff7f625af
server: https://gpt4all.io/models/
- description: A commercially licensable model based on GPT-J and trained by Nomic
AI on the v2 GPT4All dataset.
icon: https://gpt4all.io/gpt4all-128.png
owner: Nomic AI
owner_link: https://gpt4all.io
filename: ggml-gpt4all-j-v1.2-jazzy.bin
filesize: '3785248281'
md5sum: 879344aaa9d62fdccbda0be7a09e7976
server: https://gpt4all.io/models/
- description: A commercially licensable model based on GPT-J and trained by Nomic
AI on the v1 GPT4All dataset.
icon: https://gpt4all.io/gpt4all-128.png
owner: Nomic AI
owner_link: https://gpt4all.io
filename: ggml-gpt4all-j-v1.1-breezy.bin
filesize: '3785248281'
md5sum: 61d48a82cb188cceb14ebb8082bfec37
server: https://gpt4all.io/models/
- description: A commercially licensable model based on GPT-J and trained by Nomic
AI on the v0 GPT4All dataset.
icon: https://gpt4all.io/gpt4all-128.png
owner: Nomic AI
owner_link: https://gpt4all.io
filename: ggml-gpt4all-j.bin
filesize: '3785248281'
md5sum: 5b5a3f9b858d33b29b52b89692415595
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Llama 7b and trained by
teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.
icon: https://gpt4all.io/gpt4all-128.png
owner: Nomic AI
owner_link: https://gpt4all.io
filename: ggml-vicuna-7b-1.1-q4_2.bin
filesize: '4212859520'
md5sum: 29119f8fa11712704c6b22ac5ab792ea
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Llama 13b and trained
by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.
icon: https://gpt4all.io/gpt4all-128.png
owner: Nomic AI
owner_link: https://gpt4all.io
filename: ggml-vicuna-13b-1.1-q4_2.bin
filesize: '8136770688'
md5sum: 95999b7b0699e2070af63bf5d34101a8
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Llama 7b and trained by
Microsoft and Peking University.
icon: https://gpt4all.io/gpt4all-128.png
owner: Nomic AI
owner_link: https://gpt4all.io
filename: ggml-wizardLM-7B.q4_2.bin
filesize: '4212864640'
md5sum: 99e6d129745a3f1fb1121abed747b05a
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Llama 13b and RLHF trained
by Stable AI.
icon: https://gpt4all.io/gpt4all-128.png
owner: Nomic AI
owner_link: https://gpt4all.io
filename: ggml-stable-vicuna-13B.q4_2.bin
filesize: '8136777088'
md5sum: 6cb4ee297537c9133bddab9692879de0
server: https://gpt4all.io/models/
- description: A commercially licensable model base pre-trained by Mosaic ML.
owner: Nomic AI
icon: https://gpt4all.io/gpt4all-128.png
owner_link: https://gpt4all.io
filename: ggml-mpt-7b-base.bin
filesize: '4854401028'
md5sum: 120c32a51d020066288df045ef5d52b9
requires: 2.4.1
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Vicuna 13b, fine-tuned
on ~180,000 instructions, trained by Nous Research.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-nous-gpt4-vicuna-13b.bin
filesize: '8136777088'
md5sum: d5eafd5b0bd0d615cfd5fd763f642dfe
server: https://gpt4all.io/models/
- description: A commericially licensable instruct model based on MPT and trained
by Mosaic ML.
icon: https://gpt4all.io/gpt4all-128.png
owner: Nomic AI
owner_link: https://gpt4all.io
filename: ggml-mpt-7b-instruct.bin
filesize: '4854401028'
md5sum: 1cfa4958f489f0a0d1ffdf6b37322809
requires: 2.4.1
server: https://gpt4all.io/models/

View File

@ -1 +0,0 @@
pygptj

View File

@ -1,110 +0,0 @@
######
# Project : GPT4ALL-UI
# File : binding.py
# Author : ParisNeo with the help of the community
# Underlying binding : Abdeladim's pygptj binding
# Supported by Nomic-AI
# license : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui bindings.
# This binding is a wrapper to marella's binding
# Follow him on his github project : https://github.com/marella/gpt4all-j
######
from pathlib import Path
from typing import Callable
from gpt4allj import Model
from api.binding import LLMBinding
import yaml
__author__ = "parisneo"
__github__ = "https://github.com/ParisNeo/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
binding_name = "GPTJ"
class GPTJ(LLMBinding):
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a LLAMACPP binding
Args:
config (dict): The configuration file
"""
super().__init__(config, False)
self.model = Model(
model=f"./models/llama_cpp/{self.config['model']}", avx2 = self.config["use_avx2"]
)
def tokenize(self, prompt):
"""
Tokenizes the given prompt using the model's tokenizer.
Args:
prompt (str): The input prompt to be tokenized.
Returns:
list: A list of tokens representing the tokenized prompt.
"""
return None
def detokenize(self, tokens_list):
"""
Detokenizes the given list of tokens using the model's tokenizer.
Args:
tokens_list (list): A list of tokens to be detokenized.
Returns:
str: The detokenized text as a string.
"""
return None
def generate(self,
prompt:str,
n_predict: int = 128,
new_text_callback: Callable[[str], None] = bool,
verbose: bool = False,
**gpt_params ):
"""Generates text out of a prompt
Args:
prompt (str): The prompt to use for generation
n_predict (int, optional): Number of tokens to prodict. Defaults to 128.
new_text_callback (Callable[[str], None], optional): A callback function that is called everytime a new text element is generated. Defaults to None.
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
"""
try:
self.model.reset()
output = ""
for tok in self.model.generate(
prompt,
seed=self.config['seed'],
n_threads=self.config['n_threads'],
n_predict=n_predict,
top_k=gpt_params['top_k'],
top_p=gpt_params['top_p'],
temp=gpt_params["temperature"],
repeat_penalty=gpt_params['repeat_penalty'],
repeat_last_n=self.config['repeat_last_n'],
n_batch=8,
reset=True,
):
output += tok
if new_text_callback is not None:
if not new_text_callback(tok):
return output
except Exception as ex:
print(ex)
return output
@staticmethod
def get_available_models():
# Create the file path relative to the child class's directory
binding_path = Path(__file__).parent
file_path = binding_path/"models.yaml"
with open(file_path, 'r') as file:
yaml_data = yaml.safe_load(file)
return yaml_data

View File

@ -1,42 +0,0 @@
import subprocess
from pathlib import Path
import requests
from tqdm import tqdm
class Install:
def __init__(self, api):
# Get the current directory
current_dir = Path(__file__).resolve().parent
install_file = current_dir / ".installed"
if not install_file.exists():
print("-------------- GPTj binding by marella -------------------------------")
print("This is the first time you are using this binding.")
print("Installing ...")
try:
print("Checking pytorch")
import torch
import torchvision
if torch.cuda.is_available():
print("CUDA is supported.")
else:
print("CUDA is not supported. Reinstalling PyTorch with CUDA support.")
self.reinstall_pytorch_with_cuda()
except Exception as ex:
self.reinstall_pytorch_with_cuda()
# Step 2: Install dependencies using pip from requirements.txt
requirements_file = current_dir / "requirements.txt"
subprocess.run(["pip", "install", "--no-cache-dir", "-r", str(requirements_file)])
# Create ther models folder
models_folder = Path("./models/c_transformers")
models_folder.mkdir(exist_ok=True, parents=True)
#Create the install file
with open(install_file,"w") as f:
f.write("ok")
print("Installed successfully")
def reinstall_pytorch_with_cuda(self):
subprocess.run(["pip", "install", "torch", "torchvision", "torchaudio", "--no-cache-dir", "--index-url", "https://download.pytorch.org/whl/cu117"])

View File

@ -1,85 +0,0 @@
- bestGPTJ: 'true'
description: Current best commercially licensable model based on GPT-J and trained
by Nomic AI on the latest curated GPT4All dataset.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-gpt4all-j-v1.3-groovy.bin
filesize: '3785248281'
isDefault: 'true'
md5sum: 81a09a0ddf89690372fc296ff7f625af
- bestLlama: 'true'
description: Current best non-commercially licensable model based on Llama 13b and
trained by Nomic AI on the latest curated GPT4All dataset.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-gpt4all-l13b-snoozy.bin
filesize: '8136770688'
md5sum: 91f886b68fbce697e9a3cd501951e455
- bestMPT: 'true'
description: Current best non-commercially licensable chat model based on MPT and
trained by Mosaic ML.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-mpt-7b-chat.bin
filesize: '4854401050'
isDefault: 'true'
md5sum: 756249d3d6abe23bde3b1ae272628640
requires: 2.4.1
- description: A commercially licensable model based on GPT-J and trained by Nomic
AI on the v2 GPT4All dataset.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-gpt4all-j-v1.2-jazzy.bin
filesize: '3785248281'
md5sum: 879344aaa9d62fdccbda0be7a09e7976
- description: A commercially licensable model based on GPT-J and trained by Nomic
AI on the v1 GPT4All dataset.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-gpt4all-j-v1.1-breezy.bin
filesize: '3785248281'
md5sum: 61d48a82cb188cceb14ebb8082bfec37
- description: A commercially licensable model based on GPT-J and trained by Nomic
AI on the v0 GPT4All dataset.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-gpt4all-j.bin
filesize: '3785248281'
md5sum: 5b5a3f9b858d33b29b52b89692415595
- description: A non-commercially licensable model based on Llama 7b and trained by
teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-vicuna-7b-1.1-q4_2.bin
filesize: '4212859520'
md5sum: 29119f8fa11712704c6b22ac5ab792ea
- description: A non-commercially licensable model based on Llama 13b and trained
by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-vicuna-13b-1.1-q4_2.bin
filesize: '8136770688'
md5sum: 95999b7b0699e2070af63bf5d34101a8
- description: A non-commercially licensable model based on Llama 7b and trained by
Microsoft and Peking University.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-wizardLM-7B.q4_2.bin
filesize: '4212864640'
md5sum: 99e6d129745a3f1fb1121abed747b05a
- description: A non-commercially licensable model based on Llama 13b and RLHF trained
by Stable AI.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-stable-vicuna-13B.q4_2.bin
filesize: '8136777088'
md5sum: 6cb4ee297537c9133bddab9692879de0
- description: A commercially licensable model base pre-trained by Mosaic ML.
filename: ggml-mpt-7b-base.bin
icon: https://gpt4all.io/gpt4all-128.png
filesize: '4854401028'
md5sum: 120c32a51d020066288df045ef5d52b9
requires: 2.4.1
- description: A non-commercially licensable model based on Vicuna 13b, fine-tuned
on ~180,000 instructions, trained by Nous Research.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-nous-gpt4-vicuna-13b.bin
filesize: '8136777088'
md5sum: d5eafd5b0bd0d615cfd5fd763f642dfe
- description: A commericially licensable instruct model based on MPT and trained
by Mosaic ML.
icon: https://gpt4all.io/gpt4all-128.png
filename: ggml-mpt-7b-instruct.bin
filesize: '4854401028'
md5sum: 1cfa4958f489f0a0d1ffdf6b37322809
requires: 2.4.1

View File

@ -1 +0,0 @@
gpt4all-j

View File

@ -1 +0,0 @@
config_local.yaml

View File

@ -1,185 +0,0 @@
######
# Project : GPT4ALL-UI
# File : binding.py
# Author : ParisNeo with the help of the community
# Supported by Nomic-AI
# license : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui bindings.
######
from pathlib import Path
from typing import Callable
from transformers import AutoTokenizer, TextGenerationPipeline
from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
from api.binding import LLMBinding
import torch
import yaml
import requests
from tqdm import tqdm
import os
import requests
from tqdm import tqdm
from bs4 import BeautifulSoup
import concurrent.futures
import wget
__author__ = "parisneo"
__github__ = "https://github.com/ParisNeo/GPTQ_binding"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
binding_name = "GPTQ"
class GPTQ(LLMBinding):
file_extension='*'
def __init__(self, config:dict) -> None:
"""Builds a GPTQ binding
Args:
config (dict): The configuration file
"""
super().__init__(config, False)
self.model_dir = f'{config["model"]}'
pretrained_model_dir = "facebook/opt-125m"
quantized_model_dir = "opt-125m-4bit"
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_dir, use_fast=True)
# load quantized model to the first GPU
self.model = AutoGPTQForCausalLM.from_quantized(self.model_dir)
def tokenize(self, prompt):
"""
Tokenizes the given prompt using the model's tokenizer.
Args:
prompt (str): The input prompt to be tokenized.
Returns:
list: A list of tokens representing the tokenized prompt.
"""
return self.tokenizer.tokenize(prompt)
def detokenize(self, tokens_list):
"""
Detokenizes the given list of tokens using the model's tokenizer.
Args:
tokens_list (list): A list of tokens to be detokenized.
Returns:
str: The detokenized text as a string.
"""
return self.tokenizer.decode(tokens_list)
def generate(self,
prompt:str,
n_predict: int = 128,
new_text_callback: Callable[[str], None] = bool,
verbose: bool = False,
**gpt_params ):
"""Generates text out of a prompt
Args:
prompt (str): The prompt to use for generation
n_predict (int, optional): Number of tokens to prodict. Defaults to 128.
new_text_callback (Callable[[str], None], optional): A callback function that is called everytime a new text element is generated. Defaults to None.
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
"""
try:
tok = self.tokenizer.decode(self.model.generate(**self.tokenizer(prompt, return_tensors="pt").to("cuda:0"))[0])
if new_text_callback is not None:
new_text_callback(tok)
output = tok
"""
self.model.reset()
for tok in self.model.generate(prompt,
n_predict=n_predict,
temp=self.config['temp'],
top_k=self.config['top_k'],
top_p=self.config['top_p'],
repeat_penalty=self.config['repeat_penalty'],
repeat_last_n = self.config['repeat_last_n'],
n_threads=self.config['n_threads'],
):
if not new_text_callback(tok):
return
"""
except Exception as ex:
print(ex)
return output
def download_model(self, repo, base_folder, callback=None):
"""
Downloads a folder from a Hugging Face repository URL, reports the download progress using a callback function,
and displays a progress bar.
Args:
repo (str): The name of the Hugging Face repository.
base_folder (str): The base folder where the repository should be saved.
installation_path (str): The path where the folder should be saved.
callback (function, optional): A callback function to be called during the download
with the progress percentage as an argument. Defaults to None.
"""
dont_download = [".gitattributes"]
url = f"https://huggingface.co/{repo}/tree/main"
response = requests.get(url)
html_content = response.text
soup = BeautifulSoup(html_content, 'html.parser')
file_names = []
for a_tag in soup.find_all('a', {'class': 'group'}):
span_tag = a_tag.find('span', {'class': 'truncate'})
if span_tag:
file_name = span_tag.text
if file_name not in dont_download:
file_names.append(file_name)
print(f"Repo: {repo}")
print("Found files:")
for file in file_names:
print(" ", file)
dest_dir = Path(base_folder) / repo.replace("/", "_")
dest_dir.mkdir(parents=True, exist_ok=True)
os.chdir(dest_dir)
def download_file(get_file):
filename = f"https://huggingface.co/{repo}/resolve/main/{get_file}"
print(f"\nDownloading {filename}")
wget.download(filename, out=str(dest_dir), bar=callback)
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(download_file, file_names)
os.chdir(base_folder)
installation_path = Path(installation_path)
installation_path.parent.mkdir(parents=True, exist_ok=True)
dest_dir.rename(installation_path)
print("Done")
@staticmethod
def list_models(config:dict):
"""Lists the models for this binding
"""
return [
"EleutherAI/gpt-j-6b",
"opt-125m-4bit"
"TheBloke/medalpaca-13B-GPTQ-4bit",
"TheBloke/stable-vicuna-13B-GPTQ",
]
@staticmethod
def get_available_models():
# Create the file path relative to the child class's directory
binding_path = Path(__file__).parent
file_path = binding_path/"models.yaml"
with open(file_path, 'r') as file:
yaml_data = yaml.safe_load(file)
return yaml_data

View File

@ -1,6 +0,0 @@
name: AutoGPTQ
author: PanQiWei
version: 1.0
link: https://github.com/PanQiWei/AutoGPTQ
description: 'Python bindings for the GPTQ type of files
'

View File

@ -1,61 +0,0 @@
import subprocess
from pathlib import Path
import requests
from tqdm import tqdm
from api.config import save_config
class Install:
def __init__(self, api):
# Get the current directory
current_dir = Path(__file__).resolve().parent
install_file = current_dir / ".installed"
if not install_file.exists():
print("-------------- GPTQ binding -------------------------------")
print("This is the first time you are using this binding.")
print("Installing ...")
# Example of installing py torche
try:
print("Checking pytorch")
import torch
import torchvision
if torch.cuda.is_available():
print("CUDA is supported.")
else:
print("CUDA is not supported. Reinstalling PyTorch with CUDA support.")
self.reinstall_pytorch_with_cuda()
except Exception as ex:
self.reinstall_pytorch_with_cuda()
# Step 2: Install dependencies using pip from requirements.txt
requirements_file = current_dir / "requirements.txt"
subprocess.run(["pip", "install", "--upgrade", "--no-cache-dir", "-r", str(requirements_file)])
# Create the models folder
models_folder = Path(f"./models/{Path(__file__).parent.stem}")
models_folder.mkdir(exist_ok=True, parents=True)
# The local config can be used to store personal information that shouldn't be shared like chatgpt Key
# or other personal information
# This file is never commited to the repository as it is ignored by .gitignore
# You can remove this if you don't need custom local configurations
"""
self._local_config_file_path = Path(__file__).parent/"config_local.yaml"
if not self._local_config_file_path.exists():
config = {
#Put your default configurations here
}
save_config(config, self._local_config_file_path)
"""
#Create the install file (a file that is used to insure the installation was done correctly)
with open(install_file,"w") as f:
f.write("ok")
print("Installed successfully")
def reinstall_pytorch_with_cuda(self):
"""Installs pytorch with cuda (if you have a gpu)
"""
subprocess.run(["pip", "install", "torch", "torchvision", "torchaudio", "--no-cache-dir", "--index-url", "https://download.pytorch.org/whl/cu117"])

Binary file not shown.

Before

Width:  |  Height:  |  Size: 357 KiB

View File

@ -1,8 +0,0 @@
- description: GGML format model files for the original LLaMa
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: Samantha-7B-GPTQ
license: Non commercial
owner_link: https://huggingface.co/TheBloke/
owner: TheBloke
server: Samantha-7B-GPTQ
sha256: ec2f2d1f0dfb73b72a4cbac7fa121abbe04c37ab327125a38248f930c0f09ddf

View File

@ -1 +0,0 @@
auto-gptq

View File

@ -1,117 +0,0 @@
######
# Project : GPT4ALL-UI
# File : binding.py
# Author : ParisNeo with the help of the community
# Supported by Nomic-AI
# license : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui bindings.
# This binding is a wrapper to the official llamacpp python bindings
# Follow him on his github project : https://github.com/abetlen/llama-cpp-python
######
from pathlib import Path
from typing import Callable
from llama_cpp import Llama
from api.binding import LLMBinding
import yaml
import random
__author__ = "parisneo"
__github__ = "https://github.com/ParisNeo/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
binding_name = "LLAMACPP"
class LLAMACPP(LLMBinding):
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a LLAMACPP binding
Args:
config (dict): The configuration file
"""
super().__init__(config, False)
seed = config["seed"]
# if seed <=0:
# seed = random.randint(1, 2**31)
if not "n_gpu_layers" in self.config:
self.config["n_gpu_layers"] = 20
self.model = Llama(model_path=f"./models/llama_cpp_official/{self.config['model']}", n_ctx=self.config["ctx_size"], n_gpu_layers=self.config["n_gpu_layers"], seed=seed)
def tokenize(self, prompt):
"""
Tokenizes the given prompt using the model's tokenizer.
Args:
prompt (str): The input prompt to be tokenized.
Returns:
list: A list of tokens representing the tokenized prompt.
"""
return self.model.tokenize(prompt.encode())
def detokenize(self, tokens_list):
"""
Detokenizes the given list of tokens using the model's tokenizer.
Args:
tokens_list (list): A list of tokens to be detokenized.
Returns:
str: The detokenized text as a string.
"""
return self.model.detokenize(tokens_list).decode()
def generate(self,
prompt:str,
n_predict: int = 128,
new_text_callback: Callable[[str], None] = bool,
verbose: bool = False,
**gpt_params ):
"""Generates text out of a prompt
Args:
prompt (str): The prompt to use for generation
n_predict (int, optional): Number of tokens to prodict. Defaults to 128.
new_text_callback (Callable[[str], None], optional): A callback function that is called everytime a new text element is generated. Defaults to None.
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
"""
try:
self.model.reset()
output = ""
tokens = self.model.tokenize(prompt.encode())
count = 0
for tok in self.model.generate(tokens,
temp=gpt_params["temperature"],
top_k=gpt_params['top_k'],
top_p=gpt_params['top_p'],
repeat_penalty=gpt_params['repeat_penalty'],
):
if count >= n_predict or (tok == self.model.token_eos()):
break
word = self.model.detokenize([tok]).decode()
if new_text_callback is not None:
if not new_text_callback(word):
break
output += word
count += 1
except Exception as ex:
print(ex)
return output
@staticmethod
def get_available_models():
# Create the file path relative to the child class's directory
binding_path = Path(__file__).parent
file_path = binding_path/"models.yaml"
with open(file_path, 'r') as file:
yaml_data = yaml.safe_load(file)
return yaml_data

View File

@ -1,11 +0,0 @@
name: Official llamacpp binding
author: abetlen
version: 1.0
link: https://github.com/abetlen/llama-cpp-python
description: 'Simple Python bindings for @ggerganov''s llama.cpp library. This package provides:
Low-level access to C API via ctypes interface.
High-level Python API for text completion
OpenAI-like API
LangChain compatibility
Documentation is available at https://abetlen.github.io/llama-cpp-python.
'

View File

@ -1,42 +0,0 @@
import subprocess
from pathlib import Path
import requests
from tqdm import tqdm
class Install:
def __init__(self, api):
# Get the current directory
current_dir = Path(__file__).resolve().parent
install_file = current_dir / ".installed"
if not install_file.exists():
print("-------------- llama_cpp_official binding -------------------------------")
print("This is the first time you are using this binding.")
print("Installing ...")
try:
print("Checking pytorch")
import torch
import torchvision
if torch.cuda.is_available():
print("CUDA is supported.")
else:
print("CUDA is not supported. Reinstalling PyTorch with CUDA support.")
self.reinstall_pytorch_with_cuda()
except Exception as ex:
self.reinstall_pytorch_with_cuda()
# Step 2: Install dependencies using pip from requirements.txt
requirements_file = current_dir / "requirements.txt"
subprocess.run(["pip", "install", "--no-cache-dir", "-r", str(requirements_file)])
# Create ther models folder
models_folder = Path("./models/c_transformers")
models_folder.mkdir(exist_ok=True, parents=True)
#Create the install file
with open(install_file,"w") as f:
f.write("ok")
print("Installed successfully")
def reinstall_pytorch_with_cuda(self):
subprocess.run(["pip", "install", "torch", "torchvision", "torchaudio", "--no-cache-dir", "--index-url", "https://download.pytorch.org/whl/cu117"])

Binary file not shown.

Before

Width:  |  Height:  |  Size: 164 KiB

View File

@ -1,80 +0,0 @@
- bestLlama: 'true'
description: The official open assistant 30B model finally here
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: OpenAssistant-SFT-7-Llama-30B.ggml.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke
owner: TheBloke
server: https://huggingface.co/TheBloke/OpenAssistant-SFT-7-Llama-30B-GGML/resolve/main/
sha256: 32fd44c685fbf429810db593e2db8aa42a7e1be2cd3571b6005d53b029acfcf5
- bestLlama: 'true'
description: 'Manticore-13B'
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: Manticore-13B.ggmlv3.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke
owner: TheBloke
server: https://huggingface.co/TheBloke/Manticore-13B-GGML/resolve/main/
sha256: 910f3e73dc5797753313a950989c54a30342780311d64c3d4b8a37b12dd50336
- bestLlama: 'true'
description: 'Project-Baize Quantized on 4 bits '
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: baize-v2-13b.ggmlv3.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke
owner: TheBloke
server: https://huggingface.co/TheBloke/Project-Baize-v2-13B-GGML/resolve/main/
sha256: 5994f92f3cc8d3fe2d09a44c174ed8c0f4f32819597feaafc9d6bd06208d3df6
- bestLlama: 'true'
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
description: 'MedAlpaca 13B Quantized on 4 bits: model specifically fine-tuned for medical domain tasks'
filename: medalpaca-13B.ggmlv3.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke
owner: TheBloke
server: https://huggingface.co/TheBloke/medalpaca-13B-GGML/resolve/main/
sha256: 24060342f7649f5609a90932b88604f90eb34ee490f7c22403ff47b9b8547c58
- bestLlama: 'true'
description: 'MedAlpaca 13B Quantized on 5 bits: model specifically fine-tuned for medical domain tasks'
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: medalpaca-13B.ggmlv3.q5_1.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke
owner: TheBloke
server: https://huggingface.co/TheBloke/medalpaca-13B-GGML/resolve/main/
sha256: eeae440fd56fe8cb05632f22d73725914c6ef803dbb468c3ab6d4d2a78ff9ad5
- bestLlama: 'true'
description: 'Wizard-Vicuna-13B-Uncensored-GGML Quantized on 4 bits'
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: Wizard-Vicuna-13B-Uncensored.ggmlv3.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke
owner: TheBloke
server: https://huggingface.co/TheBloke/Wizard-Vicuna-13B-Uncensored-GGML/resolve/main/
sha256: 1f08b147a5bce41cfcbb3fd5d51ba765dea1786e15b5655ab69ba3a337a893b7
- description: Koala 7B model produced at Berkeley
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: koala-7B.ggmlv3.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke
owner: TheBloke
server: https://huggingface.co/TheBloke/koala-7B-GGML/resolve/previous_llama_ggmlv2/
sha256: 14bcd51fbc09bcc456c79f9f0e4d070ad536e9d9e03811232e037b62580fe5e7
- description: Vigogne Instruct 13B - A French instruction-following LLaMa model GGML
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: Vigogne-Instruct-13B.ggmlv3.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke
owner: TheBloke
server: https://huggingface.co/TheBloke/Vigogne-Instruct-13B-GGML/resolve/main/
sha256: 14bcd51fbc09bcc456c79f9f0e4d070ad536e9d9e03811232e037b62580fe5e7

View File

@ -1 +0,0 @@
llama-cpp-python==0.1.53

View File

@ -1 +0,0 @@
config_local.yaml

View File

@ -1,123 +0,0 @@
######
# Project : GPT4ALL-UI
# File : binding.py
# Author : ParisNeo with the help of the community
# Underlying binding : Abdeladim's pygptj binding
# Supported by Nomic-AI
# license : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui bindings.
# This binding is a wrapper to open ai's api
######
from pathlib import Path
from typing import Callable
from api.binding import LLMBinding
from api.config import load_config
import openai
import yaml
import re
__author__ = "parisneo"
__github__ = "https://github.com/ParisNeo/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
binding_name = "OpenAIGPT"
class OpenAIGPT(LLMBinding):
# Define what is the extension of the model files supported by your binding
# Only applicable for local models for remote models like gpt4 and others, you can keep it empty
# and reimplement your own list_models method
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a OpenAIGPT binding
Args:
config (dict): The configuration file
"""
super().__init__(config, False)
# The local config can be used to store personal information that shouldn't be shared like chatgpt Key
# or other personal information
# This file is never commited to the repository as it is ignored by .gitignore
self.config = config
self._local_config_file_path = Path(__file__).parent/"config_local.yaml"
self.local_config = load_config(self._local_config_file_path)
openai.api_key = self.local_config["openai_key"]
# Do your initialization stuff
def tokenize(self, prompt):
"""
Tokenizes the given prompt using the model's tokenizer.
Args:
prompt (str): The input prompt to be tokenized.
Returns:
list: A list of tokens representing the tokenized prompt.
"""
return None
def detokenize(self, tokens_list):
"""
Detokenizes the given list of tokens using the model's tokenizer.
Args:
tokens_list (list): A list of tokens to be detokenized.
Returns:
str: The detokenized text as a string.
"""
return None
def generate(self,
prompt:str,
n_predict: int = 128,
new_text_callback: Callable[[str], None] = bool,
verbose: bool = False,
**gpt_params ):
"""Generates text out of a prompt
Args:
prompt (str): The prompt to use for generation
n_predict (int, optional): Number of tokens to prodict. Defaults to 128.
new_text_callback (Callable[[str], None], optional): A callback function that is called everytime a new text element is generated. Defaults to None.
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
"""
try:
response = openai.Completion.create(
engine=self.config["model"], # Choose the engine according to your OpenAI plan
prompt=prompt,
max_tokens=n_predict, # Adjust the desired length of the generated response
n=1, # Specify the number of responses you want
stop=None, # Define a stop sequence if needed
temperature=gpt_params["temperature"] # Adjust the temperature for more or less randomness in the output
)
# Extract the generated reply from the API response
reply = response.choices[0].text.strip()
return reply
except Exception as ex:
print(ex)
return ""
@staticmethod
def list_models(config:dict):
"""Lists the models for this binding
"""
return ["ChatGpt by Open AI"]
@staticmethod
def get_available_models():
# Create the file path relative to the child class's directory
binding_path = Path(__file__).parent
file_path = binding_path/"models.yaml"
with open(file_path, 'r') as file:
yaml_data = yaml.safe_load(file)
return yaml_data

View File

@ -1,5 +0,0 @@
name: The original GPT
author: Open AI
version: 1.0
link: https://openai.com/
description: This binding uses the open ai api to make you use gpt4 and others

View File

@ -1,62 +0,0 @@
import subprocess
from pathlib import Path
import requests
from tqdm import tqdm
from api.config import save_config
import yaml
class Install:
def __init__(self, api):
# Get the current directory
current_dir = Path(__file__).resolve().parent
install_file = current_dir / ".installed"
if not install_file.exists():
print("-------------- OpenAI Binding -------------------------------")
print("This is the first time you are using this binding.")
print("Installing ...")
# Step 2: Install dependencies using pip from requirements.txt
requirements_file = current_dir / "requirements.txt"
subprocess.run(["pip", "install", "--upgrade", "--no-cache-dir", "-r", str(requirements_file)])
# Create the models folder
models_folder = Path(f"./models/{Path(__file__).parent.stem}")
models_folder.mkdir(exist_ok=True, parents=True)
#Create
self._local_config_file_path = Path(__file__).parent/"config_local.yaml"
if not self._local_config_file_path.exists():
key = input("Please enter your Open AI Key")
config={
"openai_key":key
}
self.config = save_config(config, self._local_config_file_path)
#Create the install file (a file that is used to insure the installation was done correctly)
with open(install_file,"w") as f:
f.write("ok")
print("Installed successfully")
def reinstall_pytorch_with_cuda(self):
"""Installs pytorch with cuda (if you have a gpu)
"""
subprocess.run(["pip", "install", "torch", "torchvision", "torchaudio", "--no-cache-dir", "--index-url", "https://download.pytorch.org/whl/cu117"])
def create_config_file(self):
"""
Create a config_local.yaml file with predefined data.
The function creates a config_local.yaml file with the specified data. The file is saved in the parent directory
of the current file.
Args:
None
Returns:
None
"""
data = {
"pdf_file_path": "" # Path to the PDF that will be discussed
}
path = Path(__file__).parent.parent / 'config_local.yaml'
with open(path, 'w') as file:
yaml.dump(data, file)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.1 KiB

View File

@ -1,39 +0,0 @@
- description: Most advanced language model by OpenAI based on GPT-3 architecture, offering powerful language generation capabilities.
icon: /bindings/open_ai/logo.png
filename: gpt-3.5-turbo
license: Commercial
owner_link: https://link_to_the_owner_web_page
owner: Open AI
server: https://openai.com
sha256: ~
model_type: api
- description: Highly capable language model for generating high-quality text and performing various natural language processing tasks.
icon: /bindings/open_ai/logo.png
filename: text-davinci-003
license: Commercial
owner_link: https://link_to_the_owner_web_page
owner: Open AI
server: https://openai.com
sha256: ~
model_type: api
- description: Earlier version of the text-davinci model, offering similar functionality with potentially slightly different performance characteristics.
icon: /bindings/open_ai/logo.png
filename: text-davinci-002
license: Commercial
owner_link: https://link_to_the_owner_web_page
owner: Open AI
server: https://openai.com
sha256: ~
model_type: api
- description: Original version of the text-davinci model, providing strong language generation capabilities.
icon: /bindings/open_ai/logo.png
filename: text-davinci-001
license: Commercial
owner_link: https://link_to_the_owner_web_page
owner: Open AI
server: https://openai.com
sha256: ~
model_type: api

View File

@ -1,2 +0,0 @@
openai
tiktoken

View File

@ -1,110 +0,0 @@
######
# Project : GPT4ALL-UI
# File : binding.py
# Author : ParisNeo with the help of the community
# Supported by Nomic-AI
# license : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui bindings.
# This binding is a wrapper to abdeladim's binding
# Follow him on his github project : https://github.com/abdeladim-s/pyllamacpp
######
from pathlib import Path
from typing import Callable
from pyllamacpp.model import Model
from api.binding import LLMBinding
import yaml
__author__ = "parisneo"
__github__ = "https://github.com/ParisNeo/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
binding_name = "PyLLAMACPP"
class PyLLAMACPP(LLMBinding):
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a LLAMACPP binding
Args:
config (dict): The configuration file
"""
super().__init__(config, False)
self.model = Model(
model_path=f"./models/py_llama_cpp/{self.config['model']}",
prompt_context="", prompt_prefix="", prompt_suffix="",
n_ctx=self.config['ctx_size'],
seed=self.config['seed'],
)
def tokenize(self, prompt):
"""
Tokenizes the given prompt using the model's tokenizer.
Args:
prompt (str): The input prompt to be tokenized.
Returns:
list: A list of tokens representing the tokenized prompt.
"""
return self.model.tokenize(prompt)
def detokenize(self, tokens_list):
"""
Detokenizes the given list of tokens using the model's tokenizer.
Args:
tokens_list (list): A list of tokens to be detokenized.
Returns:
str: The detokenized text as a string.
"""
return self.model.detokenize(tokens_list)
def generate(self,
prompt:str,
n_predict: int = 128,
new_text_callback: Callable[[str], None] = bool,
verbose: bool = False,
**gpt_params ):
"""Generates text out of a prompt
Args:
prompt (str): The prompt to use for generation
n_predict (int, optional): Number of tokens to prodict. Defaults to 128.
new_text_callback (Callable[[str], None], optional): A callback function that is called everytime a new text element is generated. Defaults to None.
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
"""
try:
self.model.reset()
output = ""
for tok in self.model.generate(prompt,
n_predict=n_predict,
temp=gpt_params['temperature'],
top_k=gpt_params['top_k'],
top_p=gpt_params['top_p'],
repeat_penalty=gpt_params['repeat_penalty'],
repeat_last_n = self.config['repeat_last_n'],
n_threads=self.config['n_threads'],
):
output += tok
if new_text_callback is not None:
if not new_text_callback(tok):
return output
except Exception as ex:
print(ex)
return output
@staticmethod
def get_available_models():
# Create the file path relative to the child class's directory
binding_path = Path(__file__).parent
file_path = binding_path/"models.yaml"
with open(file_path, 'r') as file:
yaml_data = yaml.safe_load(file)
return yaml_data

View File

@ -1,13 +0,0 @@
name: PyLLamaCpp
author: abdeladim
version: 1.0
link: https://github.com/abdeladim-s/pyllamacpp
description: 'Python bindings for llama.cpp
For those who don''t know, llama.cpp is a port of Facebook''s LLaMA model in pure C/C++:
Without dependencies
Apple silicon first-class citizen - optimized via ARM NEON
AVX2 support for x86 architectures
Mixed F16 / F32 precision
4-bit quantization support
Runs on the CPU
'

View File

@ -1,42 +0,0 @@
import subprocess
from pathlib import Path
import requests
from tqdm import tqdm
class Install:
def __init__(self, api):
# Get the current directory
current_dir = Path(__file__).resolve().parent
install_file = current_dir / ".installed"
if not install_file.exists():
print("-------------- pyllamacpp binding by abdeladim -------------------------------")
print("This is the first time you are using this binding.")
print("Installing ...")
try:
print("Checking pytorch")
import torch
import torchvision
if torch.cuda.is_available():
print("CUDA is supported.")
else:
print("CUDA is not supported. Reinstalling PyTorch with CUDA support.")
self.reinstall_pytorch_with_cuda()
except Exception as ex:
self.reinstall_pytorch_with_cuda()
# Step 2: Install dependencies using pip from requirements.txt
requirements_file = current_dir / "requirements.txt"
subprocess.run(["pip", "install", "--no-cache-dir", "-r", str(requirements_file)])
# Create ther models folder
models_folder = Path("./models/c_transformers")
models_folder.mkdir(exist_ok=True, parents=True)
#Create the install file
with open(install_file,"w") as f:
f.write("ok")
print("Installed successfully")
def reinstall_pytorch_with_cuda(self):
subprocess.run(["pip", "install", "torch", "torchvision", "torchaudio", "--no-cache-dir", "--index-url", "https://download.pytorch.org/whl/cu117"])

Binary file not shown.

Before

Width:  |  Height:  |  Size: 237 KiB

View File

@ -1,80 +0,0 @@
- bestLlama: 'true'
description: The official open assistant 30B model finally here
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: OpenAssistant-SFT-7-Llama-30B.ggml.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke
owner: TheBloke
server: https://huggingface.co/TheBloke/OpenAssistant-SFT-7-Llama-30B-GGML/resolve/main/
sha256: 32fd44c685fbf429810db593e2db8aa42a7e1be2cd3571b6005d53b029acfcf5
- bestLlama: 'true'
description: 'Manticore-13B'
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: Manticore-13B.ggmlv3.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke
owner: TheBloke
server: https://huggingface.co/TheBloke/Manticore-13B-GGML/resolve/main/
sha256: 910f3e73dc5797753313a950989c54a30342780311d64c3d4b8a37b12dd50336
- bestLlama: 'true'
description: 'Project-Baize Quantized on 4 bits '
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: baize-v2-13b.ggmlv3.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke
owner: TheBloke
server: https://huggingface.co/TheBloke/Project-Baize-v2-13B-GGML/resolve/main/
sha256: 5994f92f3cc8d3fe2d09a44c174ed8c0f4f32819597feaafc9d6bd06208d3df6
- bestLlama: 'true'
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
description: 'MedAlpaca 13B Quantized on 4 bits: model specifically fine-tuned for medical domain tasks'
filename: medalpaca-13B.ggmlv3.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke
owner: TheBloke
server: https://huggingface.co/TheBloke/medalpaca-13B-GGML/resolve/main/
sha256: 24060342f7649f5609a90932b88604f90eb34ee490f7c22403ff47b9b8547c58
- bestLlama: 'true'
description: 'MedAlpaca 13B Quantized on 5 bits: model specifically fine-tuned for medical domain tasks'
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: medalpaca-13B.ggmlv3.q5_1.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke
owner: TheBloke
server: https://huggingface.co/TheBloke/medalpaca-13B-GGML/resolve/main/
sha256: eeae440fd56fe8cb05632f22d73725914c6ef803dbb468c3ab6d4d2a78ff9ad5
- bestLlama: 'true'
description: 'Wizard-Vicuna-13B-Uncensored-GGML Quantized on 4 bits'
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: Wizard-Vicuna-13B-Uncensored.ggmlv3.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke
owner: TheBloke
server: https://huggingface.co/TheBloke/Wizard-Vicuna-13B-Uncensored-GGML/resolve/main/
sha256: 1f08b147a5bce41cfcbb3fd5d51ba765dea1786e15b5655ab69ba3a337a893b7
- description: Koala 7B model produced at Berkeley
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: koala-7B.ggmlv3.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke
owner: TheBloke
server: https://huggingface.co/TheBloke/koala-7B-GGML/resolve/previous_llama_ggmlv2/
sha256: 14bcd51fbc09bcc456c79f9f0e4d070ad536e9d9e03811232e037b62580fe5e7
- description: Vigogne Instruct 13B - A French instruction-following LLaMa model GGML
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: Vigogne-Instruct-13B.ggmlv3.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke
owner: TheBloke
server: https://huggingface.co/TheBloke/Vigogne-Instruct-13B-GGML/resolve/main/
sha256: 14bcd51fbc09bcc456c79f9f0e4d070ad536e9d9e03811232e037b62580fe5e7

View File

@ -1 +0,0 @@
pyllamacpp

View File

@ -1,31 +1,31 @@
# =================== Lord Of Large Language Models Configuration file ===========================
version: 5
user_name: user
config: default
ctx_size: 2048
n_gpu_layers: 20 #Depends on your GPU size
db_path: databases/database.db
debug: false
n_threads: 8
binding_name: llama_cpp_official
model_name: Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_0.bin
# Host information
host: localhost
language: en-US
# Supported bindings are llamacpp and gpt-j
binding: gpt_4all
model: null
n_predict: 1024
nb_messages_to_remember: 5
personality_language: english
personality_category: default
personality: gpt4all
port: 9600
repeat_last_n: 40
repeat_penalty: 1.2
# Genreration parameters
seed: -1
n_predict: 1024
ctx_size: 2048
temperature: 0.9
top_k: 50
top_p: 0.95
voice: ""
use_gpu: false # Not active yet
auto_read: false
use_avx2: true # By default we require using avx2 but if not supported, make sure you remove it from here
use_new_ui: true # By default use old ui
repeat_last_n: 40
repeat_penalty: 1.2
n_threads: 8
#Personality parameters
personalities: ["english/generic/lollms"]
default_personality_id: 0
override_personality_model_parameters: false #if true the personality parameters are overriden by those of the configuration (may affect personality behaviour)
user_name: user
# UI parameters
debug: False
db_path: databases/database.db

Binary file not shown.

Before

Width:  |  Height:  |  Size: 37 KiB

View File

@ -1,73 +0,0 @@
# PyAIPeronality Chatbot conditionning file
# Author : @ParisNeo
# Version : 1.0
# Description :
# An NLP needs conditionning to instruct it to be whatever we want it to be.
# This file is used by the GPT4All web ui to condition the personality of the model you are
# talking to.
#The version of the PyAIPersonality used to build this file
pyaipersonality_version: 0.0.5
#The version of the personality
version: 1.0.0
# Name of the personality
name: gpt4all
# Name of the user
user_name: user
# Language (see the list of supported languages here : https://github.com/ParisNeo/GPT4All_Personalities/blob/main/README.md)
language: "en_XX"
# Category
category: "General"
# Personality description:
personality_description: |
This personality is a helpful and Kind AI ready to help you solve your problems
# The conditionning instructions sent to eh model at the start of the discussion
personality_conditioning: |
## Information:
Assistant's name is gpt4all
Today's date is {{date}}
## Instructions:
Your mission is to assist user to perform various tasks and answer his questions
#Welcome message to be sent to the user when a new discussion is started
welcome_message: |
Welcome! My name is gpt4all.
How can I help you today?
# This prefix is added at the beginning of any message input by the user
user_message_prefix: "### Human:
"
# A text to put between user and chatbot messages
link_text: "\n"
# This prefix is added at the beginning of any message output by the ai
ai_message_prefix: "### Assistant:
"
# Here is the list of extensions this personality requires
dependencies: []
# A list of texts to be used to detect that the model is hallucinating and stop the generation if any one of these is output by the model
anti_prompts: ["###Human","###Assistant","### Human","### Assistant", "Human:", "Human:", "Assistant:"]
# Some personalities need a disclaimer to warn the user of potential harm that can be caused by the AI
# for example, for medical assistants, it is important to tell the user to be careful and not use medication
# without advise from a real docor.
disclaimer: ""
# Here are default model parameters
model_temperature: 0.6 # higher: more creative, lower more deterministic
model_n_predicts: 1024 # higher: generates many words, lower generates
model_top_k: 50
model_top_p: 0.90
model_repeat_penalty: 1.0
model_repeat_last_n: 40

View File

@ -8,4 +8,5 @@ markdown
gevent
gevent-websocket
pyaipersonality>=0.0.14
lollms
langchain

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

4
web/dist/index.html vendored
View File

@ -6,8 +6,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>GPT4All - WEBUI</title>
<script type="module" crossorigin src="/assets/index-538eeb6b.js"></script>
<link rel="stylesheet" href="/assets/index-faf1ba4a.css">
<script type="module" crossorigin src="/assets/index-58ade542.js"></script>
<link rel="stylesheet" href="/assets/index-33184d9f.css">
</head>
<body>
<div id="app"></div>

View File

@ -75,15 +75,15 @@
<h3 class="text-lg font-semibold cursor-pointer select-none mr-2">
Binding zoo</h3>
<div v-if="configFile.binding" class="mr-2">|</div>
<div v-if="configFile.binding_name" class="mr-2">|</div>
<div v-if="configFile.binding"
<div v-if="configFile.binding_name"
class=" text-base font-semibold cursor-pointer select-none items-center">
<div class="flex gap-1 items-center">
<img :src="imgBinding" class="w-8 h-8 rounded-full object-fill text-blue-700">
<h3 class="font-bold font-large text-lg">
{{ configFile.binding }}
{{ configFile.binding_name }}
</h3>
</div>
</div>
@ -97,7 +97,7 @@
<select id="binding" @change="update_binding($event.target.value)"
class="bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5 dark:bg-gray-700 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500">
<option v-for="item in bindingsArr" :selected="item.folder === configFile.binding"
<option v-for="item in bindingsArr" :selected="item.folder === configFile.binding_name"
:value="item.folder">
{{ item.name }} by ({{ item.author }})
</option>
@ -113,7 +113,7 @@
<TransitionGroup name="list">
<BindingEntry ref="bindingZoo" v-for="(binding, index) in bindings"
:key="'index-' + index + '-' + binding.folder" :binding="binding"
:on-selected="onSelectedBinding" :selected="binding.folder === configFile.binding">
:on-selected="onSelectedBinding" :selected="binding.folder === configFile.binding_name">
</BindingEntry>
</TransitionGroup>
</div>
@ -150,15 +150,15 @@
No model selected!
</div>
<div v-if="configFile.model" class="mr-2">|</div>
<div v-if="configFile.model_name" class="mr-2">|</div>
<div v-if="configFile.model"
<div v-if="configFile.model_name"
class=" text-base font-semibold cursor-pointer select-none items-center">
<div class="flex gap-1 items-center">
<img :src="imgModel" class="w-8 h-8 rounded-lg object-fill">
<h3 class="font-bold font-large text-lg">
{{ configFile.model }}
{{ configFile.model_name }}
</h3>
</div>
@ -197,7 +197,7 @@
:path="model.path" :owner="model.owner" :owner_link="model.owner_link"
:license="model.license" :description="model.description"
:is-installed="model.isInstalled" :on-install="onInstall" :on-uninstall="onUninstall"
:on-selected="onSelected" :selected="model.title === configFile.model" :model="model"
:on-selected="onSelected" :selected="model.title === configFile.model_name" :model="model"
:model_type="model.model_type" />
</TransitionGroup>
</div>
@ -228,15 +228,15 @@
<i :data-feather="pzc_collapsed ? 'chevron-right' : 'chevron-down'" class="mr-2"></i>
<h3 class="text-lg font-semibold cursor-pointer select-none mr-2">
Personalities zoo</h3>
<div v-if="configFile.personality" class="mr-2">|</div>
<div v-if="configFile.personalities" class="mr-2">|</div>
<div v-if="configFile.personality"
<div v-if="configFile.personalities"
class=" text-base font-semibold cursor-pointer select-none items-center">
<div class="flex gap-1 items-center">
<img :src="imgPersonality" class="w-8 h-8 rounded-full object-fill text-red-700">
<h3 class="font-bold font-large text-lg">
{{ configFile.personality }}
{{ this.configFile.personality_folder }}
</h3>
</div>
@ -251,7 +251,7 @@
<select id="persLang" @change="update_setting('personality_language', $event.target.value, refresh)"
class="bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5 dark:bg-gray-700 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500">
<option v-for="item in persLangArr" :selected="item === configFile.personality_language">{{ item
<option v-for="item in persLangArr" :selected="item === this.configFile.personality_language">{{ item
}}
</option>
@ -265,7 +265,7 @@
<select id="persCat" @change="update_setting('personality_category', $event.target.value, refresh)"
class="bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5 dark:bg-gray-700 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500">
<option v-for="item in persCatgArr" :selected="item === configFile.personality_category">{{ item
<option v-for="item in persCatgArr" :selected="item === this.configFile.personality_category">{{ item
}}
</option>
@ -295,7 +295,7 @@
<TransitionGroup name="bounce">
<personality-entry ref="personalitiesZoo" v-for="(pers, index) in personalitiesFiltered"
:key="'index-' + index + '-' + pers.name" :personality="pers"
:selected="pers.name === configFile.personality && pers.category === configFile.personality_category && pers.language === configFile.personality_language"
:selected="pers.folder === this.configFile.personality_folder && pers.category === this.configFile.personality_category && pers.language === this.configFile.personality_language"
:on-selected="onPersonalitySelected" />
</TransitionGroup>
</div>
@ -659,15 +659,13 @@ export default {
this.$refs.toast.showToast("Loading... please wait", 4, false)
}
if (pers.personality) {
if (this.configFile.personality != pers.personality.name) {
if (this.configFile.personality_folder != pers.personality.folder) {
this.settingsChanged = true
const res = this.update_setting('personality', pers.personality.folder, () => {
const res = this.update_setting('personality_folder', pers.personality.folder, () => {
this.$refs.toast.showToast("Selected personality:\n" + pers.personality.name, 4, true)
this.configFile.personality = pers.personality.name
this.configFile.personality_category = pers.personality.category
this.configFile.personality_language = pers.personality.language
this.configFile.personalities[configFile.default_personality_id] = pers.personality.language+"/"+ pers.personality.category +"/" + pers.personality.name
})
}
nextTick(() => {
@ -686,9 +684,9 @@ export default {
if (model_object) {
if (model_object.isInstalled) {
if (this.configFile.model != model_object.title) {
if (this.configFile.model_name != model_object.title) {
this.update_model(model_object.title)
this.configFile.model = model_object.title
this.configFile.model_name = model_object.title
this.$refs.toast.showToast("Selected model:\n" + model_object.title, 4, true)
this.settingsChanged = true
this.isModelSelected = true
@ -804,7 +802,7 @@ export default {
},
onSelectedBinding(binding_object) {
if (this.configFile.binding != binding_object.binding.folder) {
if (this.configFile.binding_name != binding_object.binding.folder) {
// disabled for now
// if (binding_object.binding.folder === 'backend_template' || binding_object.binding.folder === 'binding_template') {
@ -835,10 +833,17 @@ export default {
//this.api_get_req("list_languages").then(response => { this.langArr = response })
this.api_get_req("get_config").then(response => {
this.configFile = response
this.configFile.personality_language = configFile.personalities[configFile.default_personality_id].split('/')[0]
this.configFile.personality_category = configFile.personalities[configFile.default_personality_id].split('/')[1]
this.configFile.personality_folder = configFile.personalities[configFile.default_personality_id].split('/')[2]
console.log(`p lang = ${this.configFile.personality_language }`)
console.log(`p cat = ${this.configFile.personality_category }`)
console.log(`p name = ${this.configFile.personality_folder }`)
this.models.forEach(model => {
if (model.title == response["model"]) {
if (model.title == response["model_name"]) {
model.selected = true;
}
@ -882,7 +887,7 @@ export default {
// eslint-disable-next-line no-unused-vars
this.isLoading = true
this.update_setting('binding', value, (res) => {
this.update_setting('binding_name', value, (res) => {
this.refresh();
this.$refs.toast.showToast("Binding changed.", 4, true)
@ -894,7 +899,7 @@ export default {
})
// If binding changes then reset model
this.update_model(null)
this.configFile.model = null
this.configFile.model_name = null
this.api_get_req("disk_usage").then(response => {
this.diskUsage = response
@ -906,14 +911,14 @@ export default {
if (!value) this.isModelSelected = false
// eslint-disable-next-line no-unused-vars
this.isLoading = true
this.update_setting('model', value, (res) => {
this.update_setting('model_name', value, (res) => {
//this.fetchModels();
this.isLoading = false
})
},
applyConfiguration() {
// if (!this.configFile.model) {
// if (!this.configFile.model_name) {
// this.$refs.toast.showToast("Configuration changed failed.\nPlease select model first", 4, false)
// nextTick(() => {
@ -1011,6 +1016,8 @@ export default {
this.isLoading = true
this.personalities = []
const dictionary = await this.api_get_req("get_all_personalities")
console.log("all_personalities")
console.log(dictionary)
const langkeys = Object.keys(dictionary); // returns languages folder names
for (let i = 0; i < langkeys.length; i++) {
const langkey = langkeys[i];
@ -1037,7 +1044,11 @@ export default {
}
}
console.log("Personalities")
console.log(this.personalities)
this.personalitiesFiltered = this.personalities.filter((item) => item.category === this.configFile.personality_category && item.language === this.configFile.personality_language)
console.log("Personalities filtered")
console.log(this.personalitiesFiltered)
this.isLoading = false
},
@ -1053,7 +1064,7 @@ export default {
})
this.configFile = await this.api_get_req("get_config")
if (this.configFile.model) {
if (this.configFile.model_name) {
this.isModelSelected = true
}
this.fetchModels();
@ -1089,19 +1100,21 @@ export default {
if (!this.isMounted) {
return
}
return this.$refs.bindingZoo[this.$refs.bindingZoo.findIndex(item => item.binding.folder == this.configFile.binding)].$refs.imgElement.src
return this.$refs.bindingZoo[this.$refs.bindingZoo.findIndex(item => item.binding.folder == this.configFile.binding_name)].$refs.imgElement.src
},
imgModel() {
if (!this.isMounted) {
return
}
return this.$refs.modelZoo[this.$refs.modelZoo.findIndex(item => item.title == this.configFile.model)].$refs.imgElement.src
console.log("Config file")
console.log(this.configFile)
return this.$refs.modelZoo[this.$refs.modelZoo.findIndex(item => item.title == this.configFile.model_name)].$refs.imgElement.src
},
imgPersonality() {
if (!this.isMounted) {
return
}
return this.$refs.personalitiesZoo[this.$refs.personalitiesZoo.findIndex(item => item.personality.name == this.configFile.personality)].$refs.imgElement.src
return this.$refs.personalitiesZoo[this.$refs.personalitiesZoo.findIndex(item => item.personality.folder == this.configFile.personality_folder)].$refs.imgElement.src
},
},