Changed backends to bindings

This commit is contained in:
Saifeddine ALOUI 2023-05-25 23:24:14 +02:00
parent 4c949bc02e
commit a53faf8ba5
67 changed files with 287 additions and 327 deletions

View File

@ -8,7 +8,7 @@ RUN python3 -m pip install --no-cache-dir -r requirements.txt --upgrade pip
COPY ./app.py /srv/app.py
COPY ./api /srv/api
COPY ./backends /srv/backends
COPY ./bindings /srv/bindings
COPY ./static /srv/static
COPY ./templates /srv/templates

View File

@ -101,22 +101,22 @@ class ModelProcess:
def reset_config_result(self):
self._set_config_result = {
'status': 'succeeded',
'backend_status':'ok',
'binding_status':'ok',
'model_status':'ok',
'personality_status':'ok',
'errors':[]
}
def load_backend(self, backend_name:str, install=False):
def load_binding(self, binding_name:str, install=False):
if install:
print(f"Loading backend {backend_name} install ON")
print(f"Loading binding {binding_name} install ON")
else:
print(f"Loading backend : {backend_name} install is off")
backend_path = Path("backends")/backend_name
print(f"Loading binding : {binding_name} install is off")
binding_path = Path("bindings")/binding_name
if install:
# first find out if there is a requirements.txt file
install_file_name="install.py"
install_script_path = backend_path / install_file_name
install_script_path = binding_path / install_file_name
if install_script_path.exists():
module_name = install_file_name[:-3] # Remove the ".py" extension
module_spec = importlib.util.spec_from_file_location(module_name, str(install_script_path))
@ -126,16 +126,16 @@ class ModelProcess:
module.Install(self)
# define the full absolute path to the module
absolute_path = backend_path.resolve()
absolute_path = binding_path.resolve()
# infer the module name from the file path
module_name = backend_path.stem
module_name = binding_path.stem
# use importlib to load the module from the file path
loader = importlib.machinery.SourceFileLoader(module_name, str(absolute_path/"__init__.py"))
backend_module = loader.load_module()
backend_class = getattr(backend_module, backend_module.backend_name)
return backend_class
binding_module = loader.load_module()
binding_class = getattr(binding_module, binding_module.binding_name)
return binding_class
def start(self):
if self.process is None:
@ -148,8 +148,8 @@ class ModelProcess:
self.process.join()
self.process = None
def set_backend(self, backend_path):
self.backend = backend_path
def set_binding(self, binding_path):
self.binding = binding_path
def set_model(self, model_path):
self.model = model_path
@ -174,27 +174,27 @@ class ModelProcess:
def clear_queue(self):
self.clear_queue_queue.put(('clear_queue',))
def rebuild_backend(self, config):
def rebuild_binding(self, config):
try:
print(" ******************* Building Backend from main Process *************************")
backend = self.load_backend(config["backend"], install=True)
print("Backend loaded successfully")
print(" ******************* Building Binding from main Process *************************")
binding = self.load_binding(config["binding"], install=True)
print("Binding loaded successfully")
except Exception as ex:
print("Couldn't build backend.")
print("Couldn't build binding.")
print(ex)
backend = None
return backend
binding = None
return binding
def _rebuild_model(self):
try:
self.reset_config_result()
print(" ******************* Building Backend from generation Process *************************")
self.backend = self.load_backend(self.config["backend"], install=True)
print("Backend loaded successfully")
print(" ******************* Building Binding from generation Process *************************")
self.binding = self.load_binding(self.config["binding"], install=True)
print("Binding loaded successfully")
try:
model_file = Path("models")/self.config["backend"]/self.config["model"]
model_file = Path("models")/self.config["binding"]/self.config["model"]
print(f"Loading model : {model_file}")
self.model = self.backend(self.config)
self.model = self.binding(self.config)
self.model_ready.value = 1
print("Model created successfully\n")
except Exception as ex:
@ -203,17 +203,17 @@ class ModelProcess:
print(ex)
self.model = None
self._set_config_result['status'] ='failed'
self._set_config_result['backend_status'] ='failed'
self._set_config_result['errors'].append(f"couldn't build backend:{ex}")
self._set_config_result['binding_status'] ='failed'
self._set_config_result['errors'].append(f"couldn't build binding:{ex}")
except Exception as ex:
traceback.print_exc()
print("Couldn't build backend")
print("Couldn't build binding")
print(ex)
self.backend = None
self.binding = None
self.model = None
self._set_config_result['status'] ='failed'
self._set_config_result['backend_status'] ='failed'
self._set_config_result['errors'].append(f"couldn't build backend:{ex}")
self._set_config_result['binding_status'] ='failed'
self._set_config_result['errors'].append(f"couldn't build binding:{ex}")
def rebuild_personality(self):
try:
@ -244,8 +244,8 @@ class ModelProcess:
print(ex)
self.personality = AIPersonality()
self._set_config_result['status'] ='failed'
self._set_config_result['backend_status'] ='failed'
self._set_config_result['errors'].append(f"couldn't build backend:{ex}")
self._set_config_result['binding_status'] ='failed'
self._set_config_result['errors'].append(f"couldn't build binding:{ex}")
def step_callback(self, text, message_type):
self.generation_queue.put((text,self.id, message_type))
@ -331,7 +331,7 @@ class ModelProcess:
)
else:
print("No model is installed or selected. Please make sure to install a model and select it inside your configuration before attempting to communicate with the model.")
print("To do this: Install the model to your models/<backend name> folder.")
print("To do this: Install the model to your models/<binding name> folder.")
print("Then set your model information in your local configuration file that you can find in configs/local_default.yaml")
print("You can also use the ui to set your model in the settings page.")
output = ""
@ -385,8 +385,8 @@ class ModelProcess:
bk_cfg = self.config
self.config = config
print("Changing configuration")
# verify that the backend is the same
if self.config["backend"]!=bk_cfg["backend"] or self.config["model"]!=bk_cfg["model"]:
# verify that the binding is the same
if self.config["binding"]!=bk_cfg["binding"] or self.config["model"]!=bk_cfg["model"]:
self._rebuild_model()
# verify that the personality is the same
@ -401,7 +401,7 @@ class GPT4AllAPI():
self.process = ModelProcess(config)
self.config = config
self.backend = self.process.rebuild_backend(self.config)
self.binding = self.process.rebuild_binding(self.config)
self.personality = self.process.rebuild_personality()
if config["debug"]:
print(print(f"{self.personality}"))
@ -442,7 +442,7 @@ class GPT4AllAPI():
print("Install model triggered")
model_path = data["path"]
progress = 0
installation_dir = Path(f'./models/{self.config["backend"]}/')
installation_dir = Path(f'./models/{self.config["binding"]}/')
filename = Path(model_path).name
installation_path = installation_dir / filename
print("Model install requested")
@ -466,7 +466,7 @@ class GPT4AllAPI():
@socketio.on('uninstall_model')
def uninstall_model(data):
model_path = data['path']
installation_dir = Path(f'./models/{self.config["backend"]}/')
installation_dir = Path(f'./models/{self.config["binding"]}/')
filename = Path(model_path).name
installation_path = installation_dir / filename

View File

@ -1,11 +1,11 @@
######
# Project : GPT4ALL-UI
# File : backend.py
# File : binding.py
# Author : ParisNeo with the help of the community
# Supported by Nomic-AI
# license : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui backends.
# This is an interface class for GPT4All-ui bindings.
######
from pathlib import Path
from typing import Callable
@ -19,9 +19,9 @@ __copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
class LLMBackend:
class LLMBinding:
file_extension='*.bin'
backend_path = Path(__file__).parent
binding_path = Path(__file__).parent
def __init__(self, config:dict, inline:bool) -> None:
self.config = config
self.inline = inline
@ -69,16 +69,16 @@ class LLMBackend:
@staticmethod
def list_models(config:dict):
"""Lists the models for this backend
"""Lists the models for this binding
"""
models_dir = Path('./models')/config["backend"] # replace with the actual path to the models folder
return [f.name for f in models_dir.glob(LLMBackend.file_extension)]
models_dir = Path('./models')/config["binding"] # replace with the actual path to the models folder
return [f.name for f in models_dir.glob(LLMBinding.file_extension)]
@staticmethod
def get_available_models():
# Create the file path relative to the child class's directory
backend_path = Path(__file__).parent
file_path = backend_path/"models.yaml"
binding_path = Path(__file__).parent
file_path = binding_path/"models.yaml"
with open(file_path, 'r') as file:
yaml_data = yaml.safe_load(file)

60
app.py
View File

@ -83,7 +83,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
self.add_endpoint(
"/list_backends", "list_backends", self.list_backends, methods=["GET"]
"/list_bindings", "list_bindings", self.list_bindings, methods=["GET"]
)
self.add_endpoint(
"/list_models", "list_models", self.list_models, methods=["GET"]
@ -149,7 +149,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
)
self.add_endpoint(
"/set_backend", "set_backend", self.set_backend, methods=["POST"]
"/set_binding", "set_binding", self.set_binding, methods=["POST"]
)
self.add_endpoint(
@ -432,15 +432,15 @@ class Gpt4AllWebUI(GPT4AllAPI):
self.config["model"]=data['setting_value']
print("update_settings : New model selected")
elif setting_name== "backend":
if self.config['backend']!= data['setting_value']:
print(f"New backend selected : {data['setting_value']}")
self.config["backend"]=data['setting_value']
elif setting_name== "binding":
if self.config['binding']!= data['setting_value']:
print(f"New binding selected : {data['setting_value']}")
self.config["binding"]=data['setting_value']
try:
self.backend = self.process.load_backend(self.config["backend"], install=True)
self.binding = self.process.load_binding(self.config["binding"], install=True)
except Exception as ex:
print("Couldn't build backend")
print("Couldn't build binding")
return jsonify({'setting_name': data['setting_name'], "status":False, 'error':str(ex)})
else:
if self.config["debug"]:
@ -466,15 +466,15 @@ class Gpt4AllWebUI(GPT4AllAPI):
print(result)
return jsonify(result)
def list_backends(self):
backends_dir = Path('./backends') # replace with the actual path to the models folder
backends = [f.stem for f in backends_dir.iterdir() if f.is_dir() and f.stem!="__pycache__"]
return jsonify(backends)
def list_bindings(self):
bindings_dir = Path('./bindings') # replace with the actual path to the models folder
bindings = [f.stem for f in bindings_dir.iterdir() if f.is_dir() and f.stem!="__pycache__"]
return jsonify(bindings)
def list_models(self):
if self.backend is not None:
models = self.backend.list_models(self.config)
if self.binding is not None:
models = self.binding.list_models(self.config)
return jsonify(models)
else:
return jsonify([])
@ -683,18 +683,18 @@ class Gpt4AllWebUI(GPT4AllAPI):
# Return a success response
return json.dumps({"id": self.current_discussion.discussion_id, "time": timestamp, "welcome_message":self.personality.welcome_message, "sender":self.personality.name})
def set_backend(self):
def set_binding(self):
data = request.get_json()
backend = str(data["backend"])
if self.config['backend']!= backend:
print("New backend selected")
binding = str(data["binding"])
if self.config['binding']!= binding:
print("New binding selected")
self.config['backend'] = backend
self.config['binding'] = binding
try:
backend_ =self.process.load_backend(config["backend"],True)
models = backend_.list_models(self.config)
binding_ =self.process.load_binding(config["binding"],True)
models = binding_.list_models(self.config)
if len(models)>0:
self.backend = backend_
self.binding = binding_
self.config['model'] = models[0]
# Build chatbot
return jsonify(self.process.set_config(self.config))
@ -718,16 +718,16 @@ class Gpt4AllWebUI(GPT4AllAPI):
def update_model_params(self):
data = request.get_json()
backend = str(data["backend"])
binding = str(data["binding"])
model = str(data["model"])
personality_language = str(data["personality_language"])
personality_category = str(data["personality_category"])
personality = str(data["personality"])
if self.config['backend']!=backend or self.config['model'] != model:
if self.config['binding']!=binding or self.config['model'] != model:
print("update_model_params: New model selected")
self.config['backend'] = backend
self.config['binding'] = binding
self.config['model'] = model
self.config['personality_language'] = personality_language
@ -753,11 +753,11 @@ class Gpt4AllWebUI(GPT4AllAPI):
# Fixed missing argument
self.backend = self.process.rebuild_backend(self.config)
self.binding = self.process.rebuild_binding(self.config)
print("==============================================")
print("Parameters changed to:")
print(f"\tBackend:{self.config['backend']}")
print(f"\tBinding:{self.config['binding']}")
print(f"\tModel:{self.config['model']}")
print(f"\tPersonality language:{self.config['personality_language']}")
print(f"\tPersonality category:{self.config['personality_category']}")
@ -782,9 +782,9 @@ class Gpt4AllWebUI(GPT4AllAPI):
Returns:
_type_: _description_
"""
if self.backend is None:
if self.binding is None:
return jsonify([])
model_list = self.backend.get_available_models()
model_list = self.binding.get_available_models()
models = []
for model in model_list:
@ -801,7 +801,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
path = f'{server}{filename}'
else:
path = f'{server}/{filename}'
local_path = Path(f'./models/{self.config["backend"]}/{filename}')
local_path = Path(f'./models/{self.config["binding"]}/{filename}')
is_installed = local_path.exists()
models.append({
'title': filename,

View File

@ -1,20 +1,20 @@
######
# Project : GPT4ALL-UI
# File : backend.py
# File : binding.py
# Author : ParisNeo with the help of the community
# Underlying backend : Abdeladim's pygptj backend
# Underlying binding : Abdeladim's pygptj binding
# Supported by Nomic-AI
# license : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui backends.
# This is an interface class for GPT4All-ui bindings.
# This backend is a wrapper to marella's backend
# This binding is a wrapper to marella's binding
# Follow him on his github project : https://github.com/marella/ctransformers
######
from pathlib import Path
from typing import Callable
from api.backend import LLMBackend
from api.binding import LLMBinding
import yaml
from api.config import load_config
import re
@ -24,15 +24,15 @@ __github__ = "https://github.com/nomic-ai/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
backend_name = "CustomBackend"
binding_name = "CustomBinding"
class CustomBackend(LLMBackend):
# Define what is the extension of the model files supported by your backend
class CustomBinding(LLMBinding):
# Define what is the extension of the model files supported by your binding
# Only applicable for local models for remote models like gpt4 and others, you can keep it empty
# and reimplement your own list_models method
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a LLAMACPP backend
"""Builds a LLAMACPP binding
Args:
config (dict): The configuration file
@ -92,8 +92,8 @@ class CustomBackend(LLMBackend):
tokens = self.model.tokenize(prompt)
count = 0
generated_text = """
This is an empty backend that shows how you can build your own backend.
Find it in backends
This is an empty binding that shows how you can build your own binding.
Find it in bindings
"""
for tok in re.split(r' |\n', generated_text):
if count >= n_predict or self.model.is_eos_token(tok):
@ -112,17 +112,17 @@ Find it in backends
# Decomment if you want to build a custom model listing
#@staticmethod
#def list_models(config:dict):
# """Lists the models for this backend
# """Lists the models for this binding
# """
# models_dir = Path('./models')/config["backend"] # replace with the actual path to the models folder
# return [f.name for f in models_dir.glob(LLMBackend.file_extension)]
# models_dir = Path('./models')/config["binding"] # replace with the actual path to the models folder
# return [f.name for f in models_dir.glob(LLMBinding.file_extension)]
#
@staticmethod
def get_available_models():
# Create the file path relative to the child class's directory
backend_path = Path(__file__).parent
file_path = backend_path/"models.yaml"
binding_path = Path(__file__).parent
file_path = binding_path/"models.yaml"
with open(file_path, 'r') as file:
yaml_data = yaml.safe_load(file)

View File

@ -11,8 +11,8 @@ class Install:
install_file = current_dir / ".installed"
if not install_file.exists():
print("-------------- Template backend -------------------------------")
print("This is the first time you are using this backend.")
print("-------------- Template binding -------------------------------")
print("This is the first time you are using this binding.")
print("Installing ...")
# Example of installing py torche
"""

View File

@ -1,20 +1,20 @@
######
# Project : GPT4ALL-UI
# File : backend.py
# File : binding.py
# Author : ParisNeo with the help of the community
# Underlying backend : Abdeladim's pygptj backend
# Underlying binding : Abdeladim's pygptj binding
# Supported by Nomic-AI
# license : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui backends.
# This is an interface class for GPT4All-ui bindings.
# This backend is a wrapper to marella's backend
# This binding is a wrapper to marella's binding
# Follow him on his github project : https://github.com/marella/ctransformers
######
from pathlib import Path
from typing import Callable
from api.backend import LLMBackend
from api.binding import LLMBinding
import yaml
from ctransformers import AutoModelForCausalLM
@ -23,12 +23,12 @@ __github__ = "https://github.com/nomic-ai/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
backend_name = "CTRansformers"
binding_name = "CTRansformers"
class CTRansformers(LLMBackend):
class CTRansformers(LLMBinding):
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a LLAMACPP backend
"""Builds a LLAMACPP binding
Args:
config (dict): The configuration file
@ -49,7 +49,7 @@ class CTRansformers(LLMBackend):
elif 'mpt' in self.config['model']:
model_type='mpt'
else:
print("The model you are using is not supported by this backend")
print("The model you are using is not supported by this binding")
return
@ -135,8 +135,8 @@ class CTRansformers(LLMBackend):
@staticmethod
def get_available_models():
# Create the file path relative to the child class's directory
backend_path = Path(__file__).parent
file_path = backend_path/"models.yaml"
binding_path = Path(__file__).parent
file_path = binding_path/"models.yaml"
with open(file_path, 'r') as file:
yaml_data = yaml.safe_load(file)

View File

@ -10,8 +10,8 @@ class Install:
install_file = current_dir / ".installed"
if not install_file.exists():
print("-------------- cTransformers backend -------------------------------")
print("This is the first time you are using this backend.")
print("-------------- cTransformers binding -------------------------------")
print("This is the first time you are using this binding.")
print("Installing ...")
"""
try:

View File

@ -1,21 +1,21 @@
######
# Project : GPT4ALL-UI
# File : backend.py
# File : binding.py
# Author : ParisNeo with the help of the community
# Supported by Nomic-AI
# license : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui backends.
# This is an interface class for GPT4All-ui bindings.
# This backend is a wrapper to gpt4all's official backend
# This binding is a wrapper to gpt4all's official binding
# Follow him on his github project : https://github.com/nomic-ai/gpt4all
######
from pathlib import Path
from typing import Callable
from gpt4all import GPT4All
from api.backend import LLMBackend
from api.binding import LLMBinding
import yaml
__author__ = "parisneo"
@ -23,13 +23,13 @@ __github__ = "https://github.com/nomic-ai/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
backend_name = "GPT4ALL"
binding_name = "GPT4ALL"
class GPT4ALL(LLMBackend):
class GPT4ALL(LLMBinding):
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a GPT4ALL backend
"""Builds a GPT4ALL binding
Args:
config (dict): The configuration file
@ -101,8 +101,8 @@ class GPT4ALL(LLMBackend):
@staticmethod
def get_available_models():
# Create the file path relative to the child class's directory
backend_path = Path(__file__).parent
file_path = backend_path/"models.yaml"
binding_path = Path(__file__).parent
file_path = binding_path/"models.yaml"
with open(file_path, 'r') as file:
yaml_data = yaml.safe_load(file)

View File

@ -10,8 +10,8 @@ class Install:
install_file = current_dir / ".installed"
if not install_file.exists():
print("-------------- GPT4All backend by nomic-ai -------------------------------")
print("This is the first time you are using this backend.")
print("-------------- GPT4All binding by nomic-ai -------------------------------")
print("This is the first time you are using this binding.")
print("Installing ...")
"""
try:

View File

@ -1,33 +1,33 @@
######
# Project : GPT4ALL-UI
# File : backend.py
# File : binding.py
# Author : ParisNeo with the help of the community
# Underlying backend : Abdeladim's pygptj backend
# Underlying binding : Abdeladim's pygptj binding
# Supported by Nomic-AI
# license : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui backends.
# This is an interface class for GPT4All-ui bindings.
# This backend is a wrapper to abdeladim's backend
# This binding is a wrapper to abdeladim's binding
# Follow him on his github project : https://github.com/abdeladim-s/pygptj
######
from pathlib import Path
from typing import Callable
from pygptj.model import Model
from api.backend import LLMBackend
from api.binding import LLMBinding
__author__ = "parisneo"
__github__ = "https://github.com/nomic-ai/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
backend_name = "GptJ"
binding_name = "GptJ"
class GptJ(LLMBackend):
class GptJ(LLMBinding):
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a LLAMACPP backend
"""Builds a LLAMACPP binding
Args:
config (dict): The configuration file

View File

@ -10,8 +10,8 @@ class Install:
install_file = current_dir / ".installed"
if not install_file.exists():
print("-------------- GPTj backend by abdeladim -------------------------------")
print("This is the first time you are using this backend.")
print("-------------- GPTj binding by abdeladim -------------------------------")
print("This is the first time you are using this binding.")
print("Installing ...")
try:
print("Checking pytorch")

View File

@ -1,21 +1,21 @@
######
# Project : GPT4ALL-UI
# File : backend.py
# File : binding.py
# Author : ParisNeo with the help of the community
# Underlying backend : Abdeladim's pygptj backend
# Underlying binding : Abdeladim's pygptj binding
# Supported by Nomic-AI
# license : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui backends.
# This is an interface class for GPT4All-ui bindings.
# This backend is a wrapper to marella's backend
# This binding is a wrapper to marella's binding
# Follow him on his github project : https://github.com/marella/gpt4all-j
######
from pathlib import Path
from typing import Callable
from gpt4allj import Model
from api.backend import LLMBackend
from api.binding import LLMBinding
import yaml
__author__ = "parisneo"
@ -23,12 +23,12 @@ __github__ = "https://github.com/nomic-ai/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
backend_name = "GPTJ"
binding_name = "GPTJ"
class GPTJ(LLMBackend):
class GPTJ(LLMBinding):
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a LLAMACPP backend
"""Builds a LLAMACPP binding
Args:
config (dict): The configuration file
@ -101,8 +101,8 @@ class GPTJ(LLMBackend):
@staticmethod
def get_available_models():
# Create the file path relative to the child class's directory
backend_path = Path(__file__).parent
file_path = backend_path/"models.yaml"
binding_path = Path(__file__).parent
file_path = binding_path/"models.yaml"
with open(file_path, 'r') as file:
yaml_data = yaml.safe_load(file)

View File

@ -10,8 +10,8 @@ class Install:
install_file = current_dir / ".installed"
if not install_file.exists():
print("-------------- GPTj backend by marella -------------------------------")
print("This is the first time you are using this backend.")
print("-------------- GPTj binding by marella -------------------------------")
print("This is the first time you are using this binding.")
print("Installing ...")
try:
print("Checking pytorch")

View File

@ -1,31 +1,31 @@
######
# Project : GPT4ALL-UI
# File : backend.py
# File : binding.py
# Author : ParisNeo with the help of the community
# Supported by Nomic-AI
# license : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui backends.
# This is an interface class for GPT4All-ui bindings.
######
from pathlib import Path
from typing import Callable
from transformers import AutoTokenizer, TextGenerationPipeline
from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
from api.backend import LLMBackend
from api.binding import LLMBinding
import torch
import yaml
__author__ = "parisneo"
__github__ = "https://github.com/ParisNeo/GPTQ_backend"
__github__ = "https://github.com/ParisNeo/GPTQ_binding"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
backend_name = "GPTQ"
binding_name = "GPTQ"
class GPTQ(LLMBackend):
class GPTQ(LLMBinding):
file_extension='*'
def __init__(self, config:dict) -> None:
"""Builds a GPTQ backend
"""Builds a GPTQ binding
Args:
config (dict): The configuration file
@ -99,7 +99,7 @@ class GPTQ(LLMBackend):
@staticmethod
def list_models(config:dict):
"""Lists the models for this backend
"""Lists the models for this binding
"""
return [
@ -111,8 +111,8 @@ class GPTQ(LLMBackend):
@staticmethod
def get_available_models():
# Create the file path relative to the child class's directory
backend_path = Path(__file__).parent
file_path = backend_path/"models.yaml"
binding_path = Path(__file__).parent
file_path = binding_path/"models.yaml"
with open(file_path, 'r') as file:
yaml_data = yaml.safe_load(file)

View File

@ -11,8 +11,8 @@ class Install:
install_file = current_dir / ".installed"
if not install_file.exists():
print("-------------- GPTQ backend -------------------------------")
print("This is the first time you are using this backend.")
print("-------------- GPTQ binding -------------------------------")
print("This is the first time you are using this binding.")
print("Installing ...")
# Example of installing py torche
"""

View File

@ -1,20 +1,20 @@
######
# Project : GPT4ALL-UI
# File : backend.py
# File : binding.py
# Author : ParisNeo with the help of the community
# Supported by Nomic-AI
# license : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui backends.
# This is an interface class for GPT4All-ui bindings.
# This backend is a wrapper to the official llamacpp python bindings
# This binding is a wrapper to the official llamacpp python bindings
# Follow him on his github project : https://github.com/abetlen/llama-cpp-python
######
from pathlib import Path
from typing import Callable
from llama_cpp import Llama
from api.backend import LLMBackend
from api.binding import LLMBinding
import yaml
import random
@ -23,12 +23,12 @@ __github__ = "https://github.com/nomic-ai/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
backend_name = "LLAMACPP"
binding_name = "LLAMACPP"
class LLAMACPP(LLMBackend):
class LLAMACPP(LLMBinding):
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a LLAMACPP backend
"""Builds a LLAMACPP binding
Args:
config (dict): The configuration file
@ -106,8 +106,8 @@ class LLAMACPP(LLMBackend):
@staticmethod
def get_available_models():
# Create the file path relative to the child class's directory
backend_path = Path(__file__).parent
file_path = backend_path/"models.yaml"
binding_path = Path(__file__).parent
file_path = binding_path/"models.yaml"
with open(file_path, 'r') as file:
yaml_data = yaml.safe_load(file)

View File

@ -10,8 +10,8 @@ class Install:
install_file = current_dir / ".installed"
if not install_file.exists():
print("-------------- llama_cpp_official backend -------------------------------")
print("This is the first time you are using this backend.")
print("-------------- llama_cpp_official binding -------------------------------")
print("This is the first time you are using this binding.")
print("Installing ...")
try:
print("Checking pytorch")

View File

@ -20,6 +20,7 @@
- bestLlama: 'true'
description: 'Project-Baize Quantized on 4 bits '
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: baize-v2-13b.ggmlv3.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke
@ -28,6 +29,7 @@
sha256: 5994f92f3cc8d3fe2d09a44c174ed8c0f4f32819597feaafc9d6bd06208d3df6
- bestLlama: 'true'
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
description: 'MedAlpaca 13B Quantized on 4 bits: model specifically fine-tuned for medical domain tasks'
filename: medalpaca-13B.ggmlv3.q4_0.bin
license: Non commercial
@ -38,6 +40,7 @@
- bestLlama: 'true'
description: 'MedAlpaca 13B Quantized on 5 bits: model specifically fine-tuned for medical domain tasks'
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: medalpaca-13B.ggmlv3.q5_1.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke
@ -48,6 +51,7 @@
- bestLlama: 'true'
description: 'Wizard-Vicuna-13B-Uncensored-GGML Quantized on 4 bits'
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: Wizard-Vicuna-13B-Uncensored.ggmlv3.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke
@ -58,7 +62,7 @@
- bestLlama: 'true'
description: Legacy version of Vicuna 7B v 1.1 Quantized on 4 bits
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
icon : https://huggingface.co/avatars/40cbc1742585a88eeecf2915c7e4557c.svg
filename: legacy-ggml-vicuna-7B-1.1-q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/CRD716
@ -78,6 +82,7 @@
- description: Koala 7B model produced at Berkeley
icon : https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg?w=200&h=200&f=face
filename: koala-7B.ggmlv3.q4_0.bin
license: Non commercial
owner_link: https://huggingface.co/TheBloke

View File

@ -1,20 +1,20 @@
######
# Project : GPT4ALL-UI
# File : backend.py
# File : binding.py
# Author : ParisNeo with the help of the community
# Underlying backend : Abdeladim's pygptj backend
# Underlying binding : Abdeladim's pygptj binding
# Supported by Nomic-AI
# license : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui backends.
# This is an interface class for GPT4All-ui bindings.
# This backend is a wrapper to marella's backend
# This binding is a wrapper to marella's binding
# Follow him on his github project : https://github.com/marella/ctransformers
######
from pathlib import Path
from typing import Callable
from api.backend import LLMBackend
from api.binding import LLMBinding
from api.config import load_config
import yaml
import re
@ -24,15 +24,15 @@ __github__ = "https://github.com/nomic-ai/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
backend_name = "CustomBackend"
binding_name = "CustomBinding"
class CustomBackend(LLMBackend):
# Define what is the extension of the model files supported by your backend
class CustomBinding(LLMBinding):
# Define what is the extension of the model files supported by your binding
# Only applicable for local models for remote models like gpt4 and others, you can keep it empty
# and reimplement your own list_models method
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a LLAMACPP backend
"""Builds a LLAMACPP binding
Args:
config (dict): The configuration file
@ -91,8 +91,8 @@ class CustomBackend(LLMBackend):
tokens = self.model.tokenize(prompt)
count = 0
generated_text = """
This is an empty backend that shows how you can build your own backend.
Find it in backends
This is an empty binding that shows how you can build your own binding.
Find it in bindings
"""
for tok in re.split(r' |\n', generated_text):
if count >= n_predict or self.model.is_eos_token(tok):
@ -110,15 +110,15 @@ Find it in backends
@staticmethod
def list_models(config:dict):
"""Lists the models for this backend
"""Lists the models for this binding
"""
return ["ChatGpt by Open AI"]
@staticmethod
def get_available_models():
# Create the file path relative to the child class's directory
backend_path = Path(__file__).parent
file_path = backend_path/"models.yaml"
binding_path = Path(__file__).parent
file_path = binding_path/"models.yaml"
with open(file_path, 'r') as file:
yaml_data = yaml.safe_load(file)

View File

@ -11,8 +11,8 @@ class Install:
install_file = current_dir / ".installed"
if not install_file.exists():
print("-------------- Template backend -------------------------------")
print("This is the first time you are using this backend.")
print("-------------- Template binding -------------------------------")
print("This is the first time you are using this binding.")
print("Installing ...")
# Step 2: Install dependencies using pip from requirements.txt
requirements_file = current_dir / "requirements.txt"

View File

@ -1,20 +1,20 @@
######
# Project : GPT4ALL-UI
# File : backend.py
# File : binding.py
# Author : ParisNeo with the help of the community
# Supported by Nomic-AI
# license : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui backends.
# This is an interface class for GPT4All-ui bindings.
# This backend is a wrapper to abdeladim's backend
# This binding is a wrapper to abdeladim's binding
# Follow him on his github project : https://github.com/abdeladim-s/pyllamacpp
######
from pathlib import Path
from typing import Callable
from pyllamacpp.model import Model
from api.backend import LLMBackend
from api.binding import LLMBinding
import yaml
__author__ = "parisneo"
@ -22,12 +22,12 @@ __github__ = "https://github.com/nomic-ai/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
backend_name = "PyLLAMACPP"
binding_name = "PyLLAMACPP"
class PyLLAMACPP(LLMBackend):
class PyLLAMACPP(LLMBinding):
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a LLAMACPP backend
"""Builds a LLAMACPP binding
Args:
config (dict): The configuration file
@ -101,8 +101,8 @@ class PyLLAMACPP(LLMBackend):
@staticmethod
def get_available_models():
# Create the file path relative to the child class's directory
backend_path = Path(__file__).parent
file_path = backend_path/"models.yaml"
binding_path = Path(__file__).parent
file_path = binding_path/"models.yaml"
with open(file_path, 'r') as file:
yaml_data = yaml.safe_load(file)

View File

@ -10,8 +10,8 @@ class Install:
install_file = current_dir / ".installed"
if not install_file.exists():
print("-------------- pyllamacpp backend by abdeladim -------------------------------")
print("This is the first time you are using this backend.")
print("-------------- pyllamacpp binding by abdeladim -------------------------------")
print("This is the first time you are using this binding.")
print("Installing ...")
try:
print("Checking pytorch")

View File

@ -8,8 +8,8 @@ debug: false
n_threads: 8
host: localhost
language: en-US
# Supported backends are llamacpp and gpt-j
backend: gpt_4all
# Supported bindings are llamacpp and gpt-j
binding: gpt_4all
model: null
n_predict: 1024
nb_messages_to_remember: 5

View File

@ -18,7 +18,7 @@ npm run dev
> Note
> To run the developmen environment you need to create copy of the `.env` file and name it either `.env.development` or if that dont work then `.env.dev`. Set `VITE_GPT4ALL_API_BASEURL = /api/ ` in the `.env.development`.
> Run your gpt backend by launching `webui.bat` or bash `webui.sh`.
> Run your gpt binding by launching `webui.bat` or bash `webui.sh`.
## Building frontend - UI
@ -41,13 +41,13 @@ Here we keep track of things to implement and stuff we need to do.
- Add DB switcher (im thinking in the settings view)
- Make the UI work good on mobile
- Need to fix colors for `<input />` fields
- Create status bar for backend to display if something is generating on the backend
- Create status bar for binding to display if something is generating on the binding
- Add ability for users to style the whole UI, either changing Hue or changing every color manually.
- Create a panel in the Settings tab to create new personalities
- Need to investigate performance of websocket when message is being streamed back to the UI
- On first launch of the UI force users to create "User" personality, to be used as "User" for any or all input messages.
- Add drag n drop files into messages, images gets parsed as images, ability to load images from messages from DB.
- Send files to backend - images, other files for parsing data.
- Send files to binding - images, other files for parsing data.
- Ability to reorder Discussions, add tags, categories
- Export whole DB
- Reset whole DB
@ -70,14 +70,14 @@ Here we keep track of things to implement and stuff we need to do.
- Fix up the discussion array to filter out the messages by type not by count. (conditionner and )[DONE]
- Add title of current discussion to page [DONE]
- Populate settings with settings controls [DONE]
- Connect Settings to backend, ability to save changes [DONE]
- Connect Settings to binding, ability to save changes [DONE]
- Scroll to bottom [SCROLLBAR]
- Scroll to top [SCROLLBAR]
- Create stop generating button [DONE]
- Fix the generated message formatting - add line breaks, also for user input messages. [DONE]
- Maybe try to set the chatbox to float to the bottom (always on the bottom of the screen) [DONE]
- Need to fix when user inputs message it shows up in the discussion array and then add new message for bot that is typing. [DONE]
- Connect delete / export discussions to backend functions.[DONE]
- Connect delete / export discussions to binding functions.[DONE]
- Need to fix when deleting multiple discussions to not loose loading animation for each discussion when list gets updated [DONE]
- Need to add loading feedback for when a new discussion is being created [DONE]
- Add ability to select multiple discussions to delete [DONE]

View File

@ -5,7 +5,7 @@ This Flask server provides various endpoints to manage and interact with the cha
## Endpoints:
- "/list_backends": GET request endpoint to list all the available backends.
- "/list_bindings": GET request endpoint to list all the available bindings.
```
[
"llama_cpp"
@ -159,14 +159,14 @@ This Flask server provides various endpoints to manage and interact with the cha
- "/message_rank_up": GET request endpoint to rank up a message.
- "/message_rank_down": GET request endpoint to rank down a message.
- "/delete_message": GET request endpoint to delete a message.
- "/set_backend": POST request endpoint to set the backend.
- "/set_binding": POST request endpoint to set the binding.
- "/set_model": POST request endpoint to set the model.
- "/update_model_params": POST request endpoint to update the model parameters.
- "/get_config": GET request endpoint to get the chatbot's configuration.
```
{
"auto_read": false,
"backend": "llama_cpp",
"binding": "llama_cpp",
"config": "local_default",
"ctx_size": 2048,
"db_path": "databases/database.db",
@ -243,7 +243,7 @@ This Flask server provides various endpoints to manage and interact with the cha
## TODO Endpoints:
Here we list needed endpoints on th ebackend to make UI work as expected.
Here we list needed endpoints on th ebinding to make UI work as expected.
# Socketio endpoints
@ -274,7 +274,7 @@ The available settings are:
- `personality_category`: A string representing the category of personality traits to use for generating responses.
- `personality`: A string representing the name of a specific personality traits to use for generating responses.
- `model`: A string representing the model to use for generating responses.
- `backend`: A string representing the backend to use for generating responses.
- `binding`: A string representing the binding to use for generating responses.
The save_settings function is used to save the updated settings to a configuratio

View File

@ -27,7 +27,7 @@ version: 1.0
# UI metadata
has_ui: true
# backend metadata
# binding metadata
can_trigger_the_model :
# Conditionning override

View File

@ -3,55 +3,10 @@
Hi there. Today I finally make a presentation of the new alpha version of GPT4ALL webui.
Join us as we dive into the features and functionalities of this new version.
[OPENING SCENE]
First, I want to thank all of you for your support. We have reached 2.1k stars on github And I hope you continue spreading the word about this tool to give it more visibility.
Host: (Enthusiastically) Ladies and gentlemen, the wait is finally over! Introducing GPT4All WebUI, the next-generation interface that allows seamless access to the power of GPT4All. With this alpha version, you'll have an extraordinary AI experience right at your fingertips!
If not done yet, and if you think this project is useful, please consider gifting a star to the project, that helps alot.
[FEATURES]
Before starting, let me tell you what this project is made for. This project is aimed to be a hub to all LLM models that people can use. You will be able to choose your preferred binding and
Host: Let's explore the incredible features GPT4All WebUI brings to the table.
User-Friendly Interface: We've prioritized user experience, ensuring that the WebUI is intuitive and easy to navigate. Whether you're an AI enthusiast, researcher, or a developer, you'll find it a breeze to work with.
Versatile Application Support: GPT4All WebUI is designed to support a wide range of applications. From natural language processing and text generation to code completion and translation, you can tap into the AI capabilities for various projects and tasks.
Customization Options: We understand that different users have different needs. That's why we've included customization options, allowing you to tailor GPT4All's performance to your specific requirements. Adjust parameters, fine-tune responses, and optimize outputs to suit your preferences.
Collaborative Environment: Collaboration is key, and GPT4All WebUI promotes it. Seamlessly collaborate with your team members, share projects, and work together in real-time to achieve the best results.
Data Security and Privacy: We prioritize the security and privacy of our users' data. GPT4All WebUI adheres to strict protocols, ensuring that your information remains confidential throughout the AI-powered journey.
[DYNAMIC DEMONSTRATION]
Host: Now, let's dive into a live demonstration to showcase the power of GPT4All WebUI. We'll walk you through a few exciting examples to illustrate its capabilities.
[DEMO 1: Text Generation]
Host: Suppose you're a writer seeking inspiration for your next novel. GPT4All WebUI can assist you in generating creative ideas and overcoming writer's block. Let's give it a try!
[Host interacts with GPT4All WebUI, entering a writing prompt or scenario]
Host: "Generate a thrilling opening paragraph for a sci-fi novel set in a distant future."
[GPT4All WebUI processes the input and generates a response]
Host: (Reading the generated response) "In the year 3045, as the stars shimmered against a black canvas, humanity teetered on the precipice of an interstellar conflict. Captain Alyssa Harper gazed out of her spaceship's window, knowing that her decisions would shape the destiny of both Earth and the unknown galaxies beyond."
Host: Wow, that's an incredible opening! The possibilities are endless when you harness the creative power of GPT4All WebUI.
[DEMO 2: Code Completion]
Host: Developers, pay close attention! GPT4All WebUI can be your programming companion, providing intelligent code completion. Let's see it in action.
[Host interacts with GPT4All WebUI, entering a partial code snippet]
Host: "Complete the code to sort an array of integers in ascending order using the quicksort algorithm."
[GPT4All WebUI processes the input and generates a response]
Host: (Examining the generated code) "Here
The new UI uses Vue.js for all the frontend. The backend is Flask running on python. Finally, data streaming uses Websocket API.
To install
Now let's cut to the chace. Let's start by installing the tool

View File

@ -1,3 +1,3 @@
call ../env/Scripts/activate.bat
python install_backend.py %*
python install_binding.py %*
pause

View File

@ -1,4 +1,4 @@
#!/bin/bash
source ../env/Scripts/activate
python install_backend.py "$@"
python install_binding.py "$@"
read -p "Press any key to continue..."

View File

@ -1 +1 @@
GPT4All_GPTJ_backend : https://github.com/ParisNeo/GPT4All_GPTJ_backend
GPT4All_GPTJ_binding : https://github.com/ParisNeo/GPT4All_GPTJ_binding

View File

@ -5,32 +5,32 @@ import yaml
from pathlib import Path
def install_backend(backend_name):
# Load the list of available backends from backendlist.yaml
with open('backendlist.yaml', 'r') as f:
backend_list = yaml.safe_load(f)
def install_binding(binding_name):
# Load the list of available bindings from bindinglist.yaml
with open('bindinglist.yaml', 'r') as f:
binding_list = yaml.safe_load(f)
# Get the Github repository URL for the selected backend
# Get the Github repository URL for the selected binding
try:
backend_url = backend_list[backend_name]
binding_url = binding_list[binding_name]
except KeyError:
print(f"Backend '{backend_name}' not found in backendlist.yaml")
print(f"Binding '{binding_name}' not found in bindinglist.yaml")
return
# Clone the Github repository to a tmp folder
tmp_folder = Path('tmp')
if tmp_folder.exists():
shutil.rmtree(tmp_folder)
subprocess.run(['git', 'clone', backend_url, tmp_folder])
subprocess.run(['git', 'clone', binding_url, tmp_folder])
# Install the requirements.txt from the cloned project
requirements_file = tmp_folder / 'requirements.txt'
subprocess.run(['pip', 'install', '-r', str(requirements_file)])
# Copy the folder found inside the binding to ../backends
# Copy the folder found inside the binding to ../bindings
folders = [f for f in tmp_folder.iterdir() if f.is_dir() and not f.stem.startswith(".")]
src_folder = folders[0]
dst_folder = Path('../backends') / src_folder.stem
dst_folder = Path('../bindings') / src_folder.stem
print(f"coipying from {src_folder} to {dst_folder}")
# Delete the destination directory if it already exists
if dst_folder.exists():
@ -41,20 +41,20 @@ def install_backend(backend_name):
# Create an empty folder in ../models with the same name
models_folder = Path('../models')
models_folder.mkdir(exist_ok=True)
(models_folder / backend_name).mkdir(exist_ok=True, parents=True)
(models_folder / binding_name).mkdir(exist_ok=True, parents=True)
if tmp_folder.exists():
shutil.rmtree(tmp_folder)
if __name__ == '__main__':
# Load the list of available backends from backendlist.yaml
with open('backendlist.yaml', 'r') as f:
backend_list = yaml.safe_load(f)
# Load the list of available bindings from bindinglist.yaml
with open('bindinglist.yaml', 'r') as f:
binding_list = yaml.safe_load(f)
# Print the list of available backends and prompt the user to select one
print("Available backends:")
for backend_id, backend_name in enumerate(backend_list):
print(f" {backend_id} - {backend_name}")
backend_id = int(input("Select a backend to install: "))
# Print the list of available bindings and prompt the user to select one
print("Available bindings:")
for binding_id, binding_name in enumerate(binding_list):
print(f" {binding_id} - {binding_name}")
binding_id = int(input("Select a binding to install: "))
install_backend(list(backend_list.keys())[backend_id])
install_binding(list(binding_list.keys())[binding_id])

View File

@ -1,4 +1,4 @@
Here you can drop your models depending on the selected backend
Currently, supported backends are:
Here you can drop your models depending on the selected binding
Currently, supported bindings are:
- llamacpp
- gpt-j

View File

@ -20,7 +20,7 @@ setuptools.setup(
version="0.0.5",
author="Saifeddine ALOUI",
author_email="aloui.saifeddine@gmail.com",
description="A web ui for running chat models with different backends. Supports multiple personalities and extensions.",
description="A web ui for running chat models with different bindings. Supports multiple personalities and extensions.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/nomic-ai/gpt4all-ui",

View File

@ -13,7 +13,7 @@ fetch('/settings')
.then(response => response.text())
.then(html => {
document.getElementById('settings').innerHTML = html;
backendInput = document.getElementById('backend');
bindingInput = document.getElementById('binding');
modelInput = document.getElementById('model');
personalityLanguageInput = document.getElementById('personalities_language');
personalityCategoryInput = document.getElementById('personalities_category');
@ -43,7 +43,7 @@ fetch('/settings')
.then((data) => {
console.log("Received config")
console.log(data);
selectOptionByText(backendInput, data["backend"])
selectOptionByText(bindingInput, data["binding"])
selectOptionByText(modelInput, data["model"])
selectOptionByText(personalityLanguageInput, data["personality_language"])
selectOptionByText(personalityCategoryInput, data["personality_category"])
@ -74,25 +74,25 @@ fetch('/settings')
}
backendInput.addEventListener('input',() => {
console.log(`Backend (${backendInput.value})`)
bindingInput.addEventListener('input',() => {
console.log(`Binding (${bindingInput.value})`)
// Use fetch to send form values to Flask endpoint
fetch('/set_backend', {
fetch('/set_binding', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({"backend":backendInput.value}),
body: JSON.stringify({"binding":bindingInput.value}),
})
.then((response) => response.json())
.then((data) => {
console.log(data);
if(data["status"]==="no_models_found"){
alert("No models found for this backend. Make sure you select a backend that you have models for or download models from links in our repository")
alert("No models found for this binding. Make sure you select a binding that you have models for or download models from links in our repository")
}
else{
populate_settings();
alert("Backend set successfully")
alert("Binding set successfully")
}
})
.catch((error) => {
@ -115,7 +115,7 @@ fetch('/settings')
.then((data) => {
console.log(data);
populate_settings();
alert("Backend set successfully")
alert("Binding set successfully")
})
.catch((error) => {
console.error('Error:', error);
@ -159,7 +159,7 @@ fetch('/settings')
// Get form values and put them in an object
const formValues = {
seed: seedInput.value,
backend: backendInput.value,
binding: bindingInput.value,
model: modelInput.value,
personality_language:personalityLanguageInput.value,
personality_category:personalityCategoryInput.value,
@ -197,17 +197,17 @@ fetch('/settings')
function populate_settings(){
// Get a reference to the <select> element
const selectBackend = document.getElementById('backend');
const selectBinding = document.getElementById('binding');
const selectModel = document.getElementById('model');
const selectPersonalityLanguage = document.getElementById('personalities_language');
const selectPersonalityCategory = document.getElementById('personalities_category');
const selectPersonality = document.getElementById('personalities');
function populate_backends(){
selectBackend.innerHTML = "";
function populate_bindings(){
selectBinding.innerHTML = "";
// Fetch the list of .bin files from the models subfolder
fetch('/list_backends')
fetch('/list_bindings')
.then(response => response.json())
.then(data => {
if (Array.isArray(data)) {
@ -216,7 +216,7 @@ fetch('/settings')
const optionElement = document.createElement('option');
optionElement.value = filename;
optionElement.textContent = filename;
selectBackend.appendChild(optionElement);
selectBinding.appendChild(optionElement);
});
// fetch('/get_args')
@ -356,7 +356,7 @@ fetch('/settings')
});
populate_backends()
populate_bindings()
populate_models()
populate_personalities_languages()
populate_personalities_categories()

View File

@ -1,8 +1,8 @@
<div class="h-full overflow-y-auto">
<form id="model-params-form" class="bg-gray-50 dark:bg-gray-700 shadow-md rounded px-8 py-8 pt-6 pb-8 mb-4 text-black dark:text-white">
<div class="mb-4 flex-row">
<label class="font-bold" for="model">Backend</label>
<select class="bg-gray-200 dark:bg-gray-700 w-96 shadow appearance-none border rounded py-2 px-3 leading-tight focus:outline-none focus:shadow-outline" id="backend" name="backend">
<label class="font-bold" for="model">Binding</label>
<select class="bg-gray-200 dark:bg-gray-700 w-96 shadow appearance-none border rounded py-2 px-3 leading-tight focus:outline-none focus:shadow-outline" id="binding" name="binding">
</select>
</div>
<div class="mb-4 flex-row">

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

4
web/dist/index.html vendored
View File

@ -6,8 +6,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>GPT4All - WEBUI</title>
<script type="module" crossorigin src="/assets/index-2b959387.js"></script>
<link rel="stylesheet" href="/assets/index-1332cfe4.css">
<script type="module" crossorigin src="/assets/index-2548679d.js"></script>
<link rel="stylesheet" href="/assets/index-2bd2bbf7.css">
</head>
<body>
<div id="app"></div>

View File

@ -21,7 +21,7 @@
<div class="flex flex-shrink-0">
<b>Manual download:&nbsp;</b>
<a :href="path" @click.stop class="flex hover:text-secondary duration-75 active:scale-90"
title="Download this manually (faster) and put it in the models/<your backend> folder then refresh">
title="Download this manually (faster) and put it in the models/<your binding> folder then refresh">
<i data-feather="link" class="w-5 p-1"></i>
{{ title }}
</a>

View File

@ -54,8 +54,8 @@
},
methods: {
commitChanges() {
// Send the modified personality to the backend
// Implement your backend integration here
// Send the modified personality to the binding
// Implement your binding integration here
console.log('Personality changes committed');
this.editMode = false;
}

View File

@ -525,7 +525,7 @@ export default {
}
},
sendMsg(msg) {
// Sends message to backend
// Sends message to binding
this.isGenerating = true;
this.setDiscussionLoading(this.currentDiscussion.id, this.isGenerating);
axios.get('/get_generation_status', {}).then((res) => {
@ -554,7 +554,7 @@ export default {
});
},
streamMessageContent(msgObj) {
// Streams response message content from backend
// Streams response message content from binding
//console.log("stream", JSON.stringify(content))
const parent = msgObj.user_message_id
const discussion_id = msgObj.discussion_id
@ -590,7 +590,7 @@ export default {
},
async createNewDiscussion() {
// Creates new discussion on backend,
// Creates new discussion on binding,
// gets new discussion list, selects
// newly created discussion,
// scrolls to the discussion
@ -619,7 +619,7 @@ export default {
}
},
async deleteDiscussion(id) {
// Deletes discussion from backend and frontend
// Deletes discussion from binding and frontend
//const index = this.list.findIndex((x) => x.id == id)
//const discussionItem = this.list[index]

View File

@ -80,13 +80,13 @@
</div>
<div :class="{ 'hidden': mzc_collapsed }" class="flex flex-col mb-2 px-3 pb-0">
<div class="mx-2 mb-4">
<label for="backend" class="block mb-2 text-sm font-medium text-gray-900 dark:text-white">
Backend: ({{ backendsArr.length }})
<label for="binding" class="block mb-2 text-sm font-medium text-gray-900 dark:text-white">
Binding: ({{ bindingsArr.length }})
</label>
<select id="backend" @change="update_backend($event.target.value)"
<select id="binding" @change="update_binding($event.target.value)"
class="bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5 dark:bg-gray-700 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500">
<option v-for="item in backendsArr" :selected="item === configFile.backend">{{ item }}</option>
<option v-for="item in bindingsArr" :selected="item === configFile.binding">{{ item }}</option>
</select>
</div>
<div v-if="models.length > 0" class="mb-2">
@ -420,7 +420,7 @@ export default {
mzl_collapsed: false,
pzl_collapsed: false,
// Settings stuff
backendsArr: [],
bindingsArr: [],
modelsArr: [], // not used anymore but still have references in some methods
persLangArr: [],
persCatgArr: [],
@ -613,7 +613,7 @@ export default {
// No need to refresh all lists because they never change during using application.
// On settings change only config file chnages.
//
//this.api_get_req("list_backends").then(response => { this.backendsArr = response })
//this.api_get_req("list_bindings").then(response => { this.bindingsArr = response })
this.api_get_req("list_models").then(response => { this.modelsArr = response })
//this.api_get_req("list_personalities_languages").then(response => { this.persLangArr = response })
this.api_get_req("list_personalities_categories").then(response => { this.persCatgArr = response })
@ -660,23 +660,23 @@ export default {
// eslint-disable-next-line no-unused-vars
.catch(error => { return { 'status': false } });
},
update_backend(value) {
update_binding(value) {
console.log("Upgrading backend")
console.log("Upgrading binding")
// eslint-disable-next-line no-unused-vars
this.isLoading = true
this.update_setting('backend', value, (res) => {
this.update_setting('binding', value, (res) => {
this.refresh();
console.log("Backend changed");
console.log("Binding changed");
console.log(res);
this.$refs.toast.showToast("Backend changed.", 4, true)
this.$refs.toast.showToast("Binding changed.", 4, true)
this.settingsChanged = true
this.isLoading = false
nextTick(() => {
feather.replace()
})
// If backend changes then reset model
// If binding changes then reset model
this.update_model(null)
})
@ -834,7 +834,7 @@ export default {
this.isModelSelected = true
}
this.fetchModels();
this.backendsArr = await this.api_get_req("list_backends")
this.bindingsArr = await this.api_get_req("list_bindings")
this.modelsArr = await this.api_get_req("list_models")
this.persLangArr = await this.api_get_req("list_personalities_languages")
this.persCatgArr = await this.api_get_req("list_personalities_categories")