Merge branch 'main' into pr/197

This commit is contained in:
Saifeddine ALOUI 2023-05-18 13:16:25 +02:00
commit 323f509a57
24 changed files with 475 additions and 185 deletions

View File

@ -0,0 +1,106 @@
######
# Project : GPT4ALL-UI
# File : backend.py
# Author : ParisNeo with the help of the community
# Underlying backend : Abdeladim's pygptj backend
# Supported by Nomic-AI
# Licence : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui backends.
# This backend is a wrapper to marella's backend
# Follow him on his github project : https://github.com/marella/ctransformers
######
from pathlib import Path
from typing import Callable
from gpt4all_api.backend import GPTBackend
import yaml
from ctransformers import AutoModelForCausalLM
__author__ = "parisneo"
__github__ = "https://github.com/nomic-ai/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
backend_name = "GPTJ"
class GPTJ(GPTBackend):
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a LLAMACPP backend
Args:
config (dict): The configuration file
"""
super().__init__(config, False)
if 'gpt2' in self.config['model']:
model_type='gpt2'
elif 'gptj' in self.config['model']:
model_type='gptj'
elif 'gpt_neox' in self.config['model']:
model_type='gpt_neox'
elif 'dolly-v2' in self.config['model']:
model_type='dolly-v2'
elif 'starcoder' in self.config['model']:
model_type='starcoder'
else:
print("The model you are using is not supported by this backend")
return
if self.config["use_avx2"]:
self.model = AutoModelForCausalLM.from_pretrained(
f"./models/c_transformers/{self.config['model']}", model_type=model_type
)
else:
self.model = AutoModelForCausalLM.from_pretrained(
f"./models/c_transformers/{self.config['model']}", model_type=model_type, lib = "avx"
)
def generate(self,
prompt:str,
n_predict: int = 128,
new_text_callback: Callable[[str], None] = bool,
verbose: bool = False,
**gpt_params ):
"""Generates text out of a prompt
Args:
prompt (str): The prompt to use for generation
n_predict (int, optional): Number of tokens to prodict. Defaults to 128.
new_text_callback (Callable[[str], None], optional): A callback function that is called everytime a new text element is generated. Defaults to None.
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
"""
try:
self.model.reset()
tokens = self.model.tokenize(prompt.encode())
for tok in self.model.generate(
tokens,
seed=self.config['seed'],
n_threads=self.config['n_threads'],
n_predict=n_predict,
top_k=self.config['top_k'],
top_p=self.config['top_p'],
temp=self.config['temperature'],
repeat_penalty=self.config['repeat_penalty'],
repeat_last_n=self.config['repeat_last_n'],
n_batch=8,
reset=True,
):
if not new_text_callback(self.model.detokenize(tok)):
return
except Exception as ex:
print(ex)
@staticmethod
def get_available_models():
# Create the file path relative to the child class's directory
backend_path = Path(__file__).parent
file_path = backend_path/"models.yaml"
with open(file_path, 'r') as file:
yaml_data = yaml.safe_load(file)
return yaml_data

View File

@ -0,0 +1,7 @@
- bestGPTJ: 'true'
description: Current best commercially licensable model based on GPT-J and trained
by Nomic AI on the latest curated GPT4All dataset.
filename: ggml-gpt4all-j-v1.3-groovy.bin
filesize: '3785248281'
isDefault: 'true'
md5sum: 81a09a0ddf89690372fc296ff7f625af

View File

@ -6,6 +6,11 @@
# Licence : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui backends.
# This backend is a wrapper to gpt4all's official backend
# Follow him on his github project : https://github.com/nomic-ai/gpt4all
######
from pathlib import Path
from typing import Callable

View File

@ -7,6 +7,10 @@
# Licence : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui backends.
# This backend is a wrapper to abdeladim's backend
# Follow him on his github project : https://github.com/abdeladim-s/pygptj
######
from pathlib import Path
from typing import Callable

View File

@ -7,6 +7,10 @@
# Licence : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui backends.
# This backend is a wrapper to marella's backend
# Follow him on his github project : https://github.com/marella/gpt4all-j
######
from pathlib import Path
from typing import Callable

View File

@ -1,72 +0,0 @@
- bestGPTJ: 'true'
description: Current best commercially licensable model based on GPT-J and trained
by Nomic AI on the latest curated GPT4All dataset.
filename: ggml-gpt4all-j-v1.3-groovy.bin
filesize: '3785248281'
isDefault: 'true'
md5sum: 81a09a0ddf89690372fc296ff7f625af
- bestLlama: 'true'
description: Current best non-commercially licensable model based on Llama 13b and
trained by Nomic AI on the latest curated GPT4All dataset.
filename: ggml-gpt4all-l13b-snoozy.bin
filesize: '8136770688'
md5sum: 91f886b68fbce697e9a3cd501951e455
- bestMPT: 'true'
description: Current best non-commercially licensable chat model based on MPT and
trained by Mosaic ML.
filename: ggml-mpt-7b-chat.bin
filesize: '4854401050'
isDefault: 'true'
md5sum: 756249d3d6abe23bde3b1ae272628640
requires: 2.4.1
- description: A commercially licensable model based on GPT-J and trained by Nomic
AI on the v2 GPT4All dataset.
filename: ggml-gpt4all-j-v1.2-jazzy.bin
filesize: '3785248281'
md5sum: 879344aaa9d62fdccbda0be7a09e7976
- description: A commercially licensable model based on GPT-J and trained by Nomic
AI on the v1 GPT4All dataset.
filename: ggml-gpt4all-j-v1.1-breezy.bin
filesize: '3785248281'
md5sum: 61d48a82cb188cceb14ebb8082bfec37
- description: A commercially licensable model based on GPT-J and trained by Nomic
AI on the v0 GPT4All dataset.
filename: ggml-gpt4all-j.bin
filesize: '3785248281'
md5sum: 5b5a3f9b858d33b29b52b89692415595
- description: A non-commercially licensable model based on Llama 7b and trained by
teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.
filename: ggml-vicuna-7b-1.1-q4_2.bin
filesize: '4212859520'
md5sum: 29119f8fa11712704c6b22ac5ab792ea
- description: A non-commercially licensable model based on Llama 13b and trained
by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.
filename: ggml-vicuna-13b-1.1-q4_2.bin
filesize: '8136770688'
md5sum: 95999b7b0699e2070af63bf5d34101a8
- description: A non-commercially licensable model based on Llama 7b and trained by
Microsoft and Peking University.
filename: ggml-wizardLM-7B.q4_2.bin
filesize: '4212864640'
md5sum: 99e6d129745a3f1fb1121abed747b05a
- description: A non-commercially licensable model based on Llama 13b and RLHF trained
by Stable AI.
filename: ggml-stable-vicuna-13B.q4_2.bin
filesize: '8136777088'
md5sum: 6cb4ee297537c9133bddab9692879de0
- description: A commercially licensable model base pre-trained by Mosaic ML.
filename: ggml-mpt-7b-base.bin
filesize: '4854401028'
md5sum: 120c32a51d020066288df045ef5d52b9
requires: 2.4.1
- description: A non-commercially licensable model based on Vicuna 13b, fine-tuned
on ~180,000 instructions, trained by Nous Research.
filename: ggml-nous-gpt4-vicuna-13b.bin
filesize: '8136777088'
md5sum: d5eafd5b0bd0d615cfd5fd763f642dfe
- description: A commericially licensable instruct model based on MPT and trained
by Mosaic ML.
filename: ggml-mpt-7b-instruct.bin
filesize: '4854401028'
md5sum: 1cfa4958f489f0a0d1ffdf6b37322809
requires: 2.4.1

View File

@ -1,47 +0,0 @@
- bestLlama: 'false'
description: The model who started it all
filename: gpt4all-lora-quantized-ggml.new.bin
md5sum: 91f886b68fbce697e9a3cd501951e455
server: https://huggingface.co/ParisNeo/GPT4All/resolve/main/
- bestLlama: 'false'
description: The model who started it all (uncensored version)
filename: gpt4all-lora-unfiltered-quantized.new.bin
md5sum: 91f886b68fbce697e9a3cd501951e455
server: https://huggingface.co/ParisNeo/GPT4All/resolve/main/
- bestLlama: 'true'
description: Current best non-commercially licensable model based on Llama 13b and
trained by Nomic AI on the latest curated GPT4All dataset.
filename: ggml-gpt4all-l13b-snoozy.bin
filesize: '8136770688'
md5sum: 91f886b68fbce697e9a3cd501951e455
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Llama 7b and trained by
teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.
filename: ggml-vicuna-7b-1.1-q4_2.bin
filesize: '4212859520'
md5sum: 29119f8fa11712704c6b22ac5ab792ea
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Llama 13b and trained
by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.
filename: ggml-vicuna-13b-1.1-q4_2.bin
filesize: '8136770688'
md5sum: 95999b7b0699e2070af63bf5d34101a8
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Llama 7b and trained by
Microsoft and Peking University.
filename: ggml-wizardLM-7B.q4_2.bin
filesize: '4212864640'
md5sum: 99e6d129745a3f1fb1121abed747b05a
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Llama 13b and RLHF trained
by Stable AI.
filename: ggml-stable-vicuna-13B.q4_2.bin
filesize: '8136777088'
md5sum: 6cb4ee297537c9133bddab9692879de0
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Vicuna 13b, fine-tuned
on ~180,000 instructions, trained by Nous Research.
filename: ggml-nous-gpt4-vicuna-13b.bin
filesize: '8136777088'
md5sum: d5eafd5b0bd0d615cfd5fd763f642dfe
server: https://gpt4all.io/models/

View File

@ -6,38 +6,39 @@
# Licence : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui backends.
# This backend is a wrapper to the official llamacpp python bindings
# Follow him on his github project : https://github.com/abetlen/llama-cpp-python
######
from pathlib import Path
from typing import Callable
from accelerate import init_empty_weights
from accelerate import load_checkpoint_and_dispatch
from transformers import AutoTokenizer
from transformers import AutoConfig, AutoModelForCausalLM
from llama_cpp import Llama
from gpt4all_api.backend import GPTBackend
import torch
import yaml
import random
__author__ = "parisneo"
__github__ = "https://github.com/ParisNeo/GPTQ_backend"
__github__ = "https://github.com/nomic-ai/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
backend_name = "HuggingFace"
backend_name = "LLAMACPP"
class HuggingFace(GPTBackend):
file_extension='*'
class LLAMACPP(GPTBackend):
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a HuggingFace backend
"""Builds a LLAMACPP backend
Args:
config (dict): The configuration file
"""
super().__init__(config, False)
# load quantized model, currently only support cpu or single gpu
config_path = AutoConfig.from_pretrained(config["model"])
self.tokenizer = AutoTokenizer.from_pretrained(config["model"])
self.model = AutoModelForCausalLM.from_pretrained(config["model"], load_in_8bit=True, device_map='auto')
seed = config["seed"]
if seed <=0:
seed = random.randint(1, 2**31)
self.model = Llama(model_path=f"./models/llama_cpp_official/{self.config['model']}", n_gpu_layers=40, seed=seed)
def generate(self,
prompt:str,
@ -54,30 +55,31 @@ class HuggingFace(GPTBackend):
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
"""
try:
tok = self.tokenizer.decode(self.model.generate(**self.tokenizer(prompt, return_tensors="pt").to("cuda:0"))[0])
new_text_callback(tok)
"""
self.model.reset()
for tok in self.model.generate(prompt,
n_predict=n_predict,
temp=self.config['temp'],
tokens = self.model.tokenize(prompt.encode())
count = 0
for tok in self.model.generate(tokens,
temp=self.config['temperature'],
top_k=self.config['top_k'],
top_p=self.config['top_p'],
repeat_penalty=self.config['repeat_penalty'],
repeat_last_n = self.config['repeat_last_n'],
n_threads=self.config['n_threads'],
):
if not new_text_callback(tok):
if count >= n_predict or (tok == self.model.token_eos()):
break
word = self.model.detokenize([tok]).decode()
if not new_text_callback(word):
return
"""
count += 1
except Exception as ex:
print(ex)
@staticmethod
def list_models(config:dict):
"""Lists the models for this backend
"""
def get_available_models():
# Create the file path relative to the child class's directory
backend_path = Path(__file__).parent
file_path = backend_path/"models.yaml"
with open(file_path, 'r') as file:
yaml_data = yaml.safe_load(file)
return [
"EleutherAI/gpt-j-6B"
]
return yaml_data

View File

@ -0,0 +1,29 @@
- bestLlama: 'true'
license: Non commercial
description: The official open assistant 30B model finally here
filename: OpenAssistant-SFT-7-Llama-30B.ggml.q4_0.bin
sha256: 32fd44c685fbf429810db593e2db8aa42a7e1be2cd3571b6005d53b029acfcf5
server: https://huggingface.co/TheBloke/OpenAssistant-SFT-7-Llama-30B-GGML/resolve/main/
- bestLlama: 'true'
license: Non commercial
description: The wizardVicuna model 13B
filename: wizard-vicuna-13B.ggml.q4_0.bin
sha256: 32fd44c685fbf429810db593e2db8aa42a7e1be2cd3571b6005d53b029acfcf5
server: https://huggingface.co/TheBloke/wizard-vicuna-13B-GGML/resolve/main/
- bestLlama: 'true'
license: Non commercial
description: The wizardLM model 7B
filename: WizardLM-7B-uncensored.ggml.q4_0.bin
sha256: b1e53a3c3a9389b9c5d81e0813cfb90ebaff6acad1733fad08cd28974fa3ac30
server: https://huggingface.co/TheBloke/WizardLM-7B-uncensored-GGML/resolve/main/
- bestLlama: 'true'
license: Non commercial
description: The wizardLM model uncensored
filename: WizardLM-7B-uncensored.ggml.q4_0.bin
md5sum: b1e53a3c3a9389b9c5d81e0813cfb90ebaff6acad1733fad08cd28974fa3ac30
server: https://huggingface.co/TheBloke/WizardLM-7B-uncensored-GGML/resolve/main/

View File

@ -0,0 +1 @@
llama-cpp-python

View File

@ -6,6 +6,10 @@
# Licence : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui backends.
# This backend is a wrapper to abdeladim's backend
# Follow him on his github project : https://github.com/abdeladim-s/pyllamacpp
######
from pathlib import Path
from typing import Callable
@ -18,9 +22,9 @@ __github__ = "https://github.com/nomic-ai/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
backend_name = "LLAMACPP"
backend_name = "PyLLAMACPP"
class LLAMACPP(GPTBackend):
class PyLLAMACPP(GPTBackend):
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a LLAMACPP backend
@ -31,7 +35,7 @@ class LLAMACPP(GPTBackend):
super().__init__(config, False)
self.model = Model(
model_path=f"./models/llama_cpp/{self.config['model']}",
model_path=f"./models/py_llama_cpp/{self.config['model']}",
prompt_context="", prompt_prefix="", prompt_suffix="",
n_ctx=self.config['ctx_size'],
seed=self.config['seed'],

View File

@ -0,0 +1,11 @@
- bestLlama: 'true'
description: The official open assistant 30B model finally here
filename: OpenAssistant-SFT-7-Llama-30B.ggml.q4_0.bin
md5sum: 91f886b68fbce697e9a3cd501951e455
server: https://huggingface.co/TheBloke/OpenAssistant-SFT-7-Llama-30B-GGML/resolve/main/
- bestLlama: 'true'
description: Stable vicuna 13B
filename: stable-vicuna-13B.ggml.q5_1.bin
md5sum: 91f886b68fbce697e9a3cd501951e455
server: https://huggingface.co/TheBloke/stable-vicuna-13B-GGML/resolve/main/

View File

@ -0,0 +1 @@
pyllamacpp

View File

@ -7,7 +7,7 @@ n_threads: 8
host: localhost
language: en-US
# Supported backends are llamacpp and gpt-j
backend: llama_cpp
backend: llama_cpp_official
model: null
n_predict: 1024
nb_messages_to_remember: 5

View File

@ -36,27 +36,29 @@ This will update `/dist/` folder with all the files. Also the build will show yo
Here we keep track of things to implement and stuff we need to do.
## Todo's
- Add ability to select multiple discussions to export or delete [WIP]
- Add ability to select multiple discussions to export [WIP]
- Add toast messages for errors and successes
- Populate settings with settings controls [WIP]
- Connect Settings to backend, ability to save changes
- Add DB switcher (im thinking in the settings view)
- Make the UI work good on mobile
- Scroll to bottom
- Scroll to top
- Need to fix colors for `<input />` fields
- Create status bar for backend to display if something is generating on the backend
- Create stop generating button
- Fix the generated message formatting - add line breaks, also for user input messages.
- Add ability for users to style the whole UI, either changing Hue or changing every color manually.
- Maybe try to set the chatbox to float to the bottom (always on the bottom of the screen)
- Create a panel in the Settings tab to create new personalities
- Need to fix when user inputs message it shows up in the discussion array and then add new message for bot that is typing.
- Need to investigate performance of websocket when message is being streamed back to the UI
- On first launch of the UI force users to create "User" personality, to be used as "User" for any or all input messages.
- Connect delete / export discussions to backend functions.
- Need to fix when deleting multiple discussions to not loose loading animation for each discussion when list gets updated
- Need to add loading feedback for when a new discussion is being created
- Add drag n drop files into messages, images gets parsed as images, ability to load images from messages from DB.
- Send files to backend - images, other files for parsing data.
- Ability to reorder Discussions, add tags, categories
- Export whole DB
- Reset whole DB
- Add text to speech to messages and chatbox
- Add indicator to messages when non-comercial model was used.
- Ability to export messages that are for commercial use or open source model messages Only.
- Ability to hide non commercial model messages.
- Feature - bot council where you add multiple bots to discussion, give them a topic and maybe max message count or something and they ponder about the topic then summerize it all.
- Feature voice chat with bot and voice output from bot - whisper + bard?
- Feature under selected discussion show more options to select and add more bots, select model per discussion or per bot, tweak settings per bot or per model.
- Easy share personality via export to a file, then drag and drop on to the webui and youre done.
## Done
@ -66,4 +68,16 @@ Here we keep track of things to implement and stuff we need to do.
- Add clear filter button to search input field [DONE]
- Add modal to ask user if you sure about to delete [DONE but in different way]
- Fix up the discussion array to filter out the messages by type not by count. (conditionner and )[DONE]
- Add title of current discussion to page [DONE]
- Add title of current discussion to page [DONE]
- Populate settings with settings controls [DONE]
- Connect Settings to backend, ability to save changes [DONE]
- Scroll to bottom [SCROLLBAR]
- Scroll to top [SCROLLBAR]
- Create stop generating button [DONE]
- Fix the generated message formatting - add line breaks, also for user input messages. [DONE]
- Maybe try to set the chatbox to float to the bottom (always on the bottom of the screen) [DONE]
- Need to fix when user inputs message it shows up in the discussion array and then add new message for bot that is typing. [DONE]
- Connect delete / export discussions to backend functions.[DONE]
- Need to fix when deleting multiple discussions to not loose loading animation for each discussion when list gets updated [DONE]
- Need to add loading feedback for when a new discussion is being created [DONE]
- Add ability to select multiple discussions to delete [DONE]

View File

@ -28,6 +28,58 @@ __license__ = "Apache 2.0"
import subprocess
import pkg_resources
# ===========================================================
# Manage automatic install scripts
def is_package_installed(package_name):
try:
dist = pkg_resources.get_distribution(package_name)
return True
except pkg_resources.DistributionNotFound:
return False
def install_package(package_name):
try:
# Check if the package is already installed
__import__(package_name)
print(f"{package_name} is already installed.")
except ImportError:
print(f"{package_name} is not installed. Installing...")
# Install the package using pip
subprocess.check_call(["pip", "install", package_name])
print(f"{package_name} has been successfully installed.")
def parse_requirements_file(requirements_path):
with open(requirements_path, 'r') as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'):
# Skip empty and commented lines
continue
package_name, _, version_specifier = line.partition('==')
package_name, _, version_specifier = line.partition('>=')
if is_package_installed(package_name):
# The package is already installed
print(f"{package_name} is already installed.")
else:
# The package is not installed, install it
if version_specifier:
install_package(f"{package_name}{version_specifier}")
else:
install_package(package_name)
# ===========================================================
class ModelProcess:
def __init__(self, config=None):
self.config = config
@ -42,7 +94,11 @@ class ModelProcess:
self.model_ready = mp.Value('i', 0)
self.ready = False
def load_backend(self, backend_path):
def load_backend(self, backend_path:Path):
# first find out if there is a requirements.txt file
requirements_file = backend_path/"requirements.txt"
if requirements_file.exists():
parse_requirements_file(requirements_file)
# define the full absolute path to the module
absolute_path = backend_path.resolve()
@ -88,6 +144,7 @@ class ModelProcess:
def rebuild_backend(self, config):
try:
backend = self.load_backend(Path("backends")/config["backend"])
print("Backend loaded successfully")
except Exception as ex:
@ -106,7 +163,7 @@ class ModelProcess:
print(f"Loading model : {model_file}")
self.model = self.backend(self.config)
self.model_ready.value = 1
print("Model created successfully")
print("Model created successfully\ntesting the model, please wait ...")
except Exception as ex:
print("Couldn't build model")
print(ex)
@ -239,6 +296,7 @@ class ModelProcess:
while not self.set_config_queue.empty():
config = self.set_config_queue.get()
if config is not None:
print("Inference process : Setting configuration")
self._set_config(config)
def _cancel_generation(self):
@ -472,6 +530,8 @@ class GPT4AllAPI():
)
self.current_ai_message_id = message_id
else:
message_id = 0
return message_id
def prepare_reception(self):
@ -492,12 +552,23 @@ class GPT4AllAPI():
messages = self.current_discussion.get_messages()
self.full_message_list = []
for message in messages:
if message["id"]<= message_id or message_id==-1:
if message["id"]< message_id or message_id==-1:
if message["type"]==self.db.MSG_TYPE_NORMAL:
if message["sender"]==self.personality.name:
self.full_message_list.append(self.personality.ai_message_prefix+message["content"])
else:
self.full_message_list.append(self.personality.user_message_prefix + message["content"])
else:
break
if self.personality.processor is not None:
preprocessed_prompt = self.personality.processor.process_model_input(message["content"])
else:
preprocessed_prompt = message["content"]
if preprocessed_prompt is not None:
self.full_message_list.append(self.personality.user_message_prefix+preprocessed_prompt+self.personality.link_text+self.personality.ai_message_prefix)
else:
self.full_message_list.append(self.personality.user_message_prefix+preprocessed_prompt+self.personality.link_text+self.personality.ai_message_prefix)
link_text = self.personality.link_text

View File

View File

@ -4,7 +4,6 @@ nomic
pytest
pyyaml
markdown
pyllamacpp==2.1.1
gpt4all-j
pygptj
gpt4all
@ -16,4 +15,5 @@ transformers
accelerate
gevent
gevent-websocket
pyaipersonality>=0.0.11
pyaipersonality>=0.0.12
ctransformers

File diff suppressed because one or more lines are too long

71
web/dist/assets/index-9b4a79fd.js vendored Normal file

File diff suppressed because one or more lines are too long

1
web/dist/assets/index-e783286d.css vendored Normal file

File diff suppressed because one or more lines are too long

4
web/dist/index.html vendored
View File

@ -6,8 +6,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>GPT4All - WEBUI</title>
<script type="module" crossorigin src="/assets/index-536b56d7.js"></script>
<link rel="stylesheet" href="/assets/index-8f995011.css">
<script type="module" crossorigin src="/assets/index-9b4a79fd.js"></script>
<link rel="stylesheet" href="/assets/index-e783286d.css">
</head>
<body>
<div id="app"></div>

View File

@ -13,6 +13,7 @@
<i data-feather="check"></i>
</button>
</div>
<!-- SAVE AND RESET -->
<div v-if="!showConfirmation" class="flex gap-3 flex-1 items-center ">
@ -24,6 +25,10 @@
@click="reset_configuration()">
<i data-feather="refresh-ccw"></i>
</button>
<button class="text-2xl hover:text-secondary duration-75 active:scale-90" title="Collapse / Expand all panels"
type="button" @click.stop="all_collapsed=!all_collapsed">
<i data-feather="list"></i>
</button>
</div>
</div>
@ -68,23 +73,23 @@
</div>
</div>
<div
class="flex flex-col mb-2 p-3 rounded-lg bg-bg-light-tone dark:bg-bg-dark-tone hover:bg-bg-light-tone-panel hover:dark:bg-bg-dark-tone-panel duration-150 shadow-lg">
<div class="flex flex-row ">
class="flex flex-col mb-2 rounded-lg bg-bg-light-tone dark:bg-bg-dark-tone hover:bg-bg-light-tone-panel hover:dark:bg-bg-dark-tone-panel duration-150 shadow-lg">
<div class="flex flex-row p-3">
<button @click.stop="mzc_collapsed = !mzc_collapsed"
class="text-2xl hover:text-primary duration-75 p-2 -m-2 w-full text-left active:translate-y-1">
<!-- <i data-feather="chevron-right"></i> -->
<h3 class="text-lg font-semibold cursor-pointer select-none "
@click.stop="mzc_collapsed = !mzc_collapsed">
Models zoo</h3>
</button>
</div>
<div :class="{ 'hidden': mzc_collapsed }" class="flex flex-col mb-2 p-2">
<div v-if="models.length > 0" class="my-2">
<div :class="{ 'hidden': mzc_collapsed }" class="flex flex-col mb-2 px-3 pb-0">
<div v-if="models.length > 0" class="mb-2">
<label for="model" class="block ml-2 mb-2 text-sm font-medium text-gray-900 dark:text-white">
Install more models:
</label>
<div class="overflow-y-auto max-h-96 no-scrollbar p-2">
<div ref="modelZoo" class="overflow-y-auto no-scrollbar p-2 pb-0" :class="mzl_collapsed ? '':'max-h-96'">
<model-entry v-for="(model, index) in models" :key="index" :title="model.title" :icon="model.icon"
:path="model.path" :description="model.description" :is-installed="model.isInstalled"
:on-install="onInstall"
@ -92,7 +97,17 @@
:on-selected="onSelected" />
</div>
</div>
<!-- EXPAND / COLLAPSE BUTTON -->
<button v-if="mzl_collapsed" class="text-2xl hover:text-secondary duration-75 flex justify-center hover:bg-bg-light-tone hover:dark:bg-bg-dark-tone rounded-lg " title="Collapse"
type="button" @click="mzl_collapsed = !mzl_collapsed">
<i data-feather="chevron-up" ></i>
</button>
<button v-else class="text-2xl hover:text-secondary duration-75 flex justify-center hover:bg-bg-light-tone hover:dark:bg-bg-dark-tone rounded-lg " title="Expand"
type="button" @click="mzl_collapsed = !mzl_collapsed">
<i data-feather="chevron-down" ></i>
</button>
</div>
</div>
<!-- PERSONALITY -->
<div
@ -361,12 +376,16 @@ export default {
// Models zoo installer stuff
models: [],
personalities:[],
// Accordeon stuff
// Accordeon stuff
collapsedArr:[],
all_collapsed:true,
bec_collapsed: true,
mzc_collapsed: true, // models zoo
pzc_collapsed: true, // personalities zoo
pc_collapsed: true,
mc_collapsed: true,
// Zoo accordeoon
mzl_collapsed:false,
// Settings stuff
backendsArr: [],
modelsArr: [],
@ -382,6 +401,13 @@ export default {
created() {
this.fetchModels();
}, methods: {
collapseAll(val){
this.bec_collapsed=val
this.mzc_collapsed=val
this.pzc_collapsed=val
this.pc_collapsed=val
this.mc_collapsed=val
},
fetchModels() {
axios.get('/get_available_models')
.then(response => {
@ -460,14 +486,16 @@ export default {
},
// Refresh stuff
refresh() {
console.log("Refreshing")
// No need to refresh all lists because they never change during using application.
// On settings change only config file chnages.
//
//this.api_get_req("list_backends").then(response => { this.backendsArr = response })
//this.api_get_req("list_models").then(response => { this.modelsArr = response })
this.api_get_req("list_models").then(response => { this.modelsArr = response })
//this.api_get_req("list_personalities_languages").then(response => { this.persLangArr = response })
//this.api_get_req("list_personalities_categories").then(response => { this.persCatgArr = response })
//this.api_get_req("list_personalities").then(response => { this.persArr = response })
this.api_get_req("list_personalities_categories").then(response => { this.persCatgArr = response })
this.api_get_req("list_personalities").then(response => { this.persArr = response })
//this.api_get_req("list_languages").then(response => { this.langArr = response })
this.api_get_req("get_config").then(response => {
this.configFile = response
@ -615,8 +643,23 @@ export default {
nextTick(() => {
feather.replace()
})
},
mzl_collapsed() {
nextTick(() => {
feather.replace()
})
},
all_collapsed(val) {
this.collapseAll(val)
nextTick(() => {
feather.replace()
})
}
}
}
</script>