upgraded models

This commit is contained in:
ParisNeo 2023-05-14 02:29:09 +02:00
parent ec61eff14c
commit 56eab039bf
20 changed files with 108 additions and 40 deletions

6
.gitignore vendored
View File

@ -160,12 +160,6 @@ databases/*
extensions/ extensions/
!extensions/.keep !extensions/.keep
# backends
backends/
!backends/gpt4all
!backends/llama_cpp
!backends/gptj
!backends/__init__.py
web/.env.build web/.env.build
web/.env.dev web/.env.dev
web/.env.development web/.env.development

6
app.py
View File

@ -23,7 +23,7 @@ import traceback
import sys import sys
from tqdm import tqdm from tqdm import tqdm
from pyaipersonality import AIPersonality from pyaipersonality import AIPersonality
from pyGpt4All.db import DiscussionsDB, Discussion from gpt4all_api.db import DiscussionsDB, Discussion
from flask import ( from flask import (
Flask, Flask,
Response, Response,
@ -56,8 +56,8 @@ log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR) log.setLevel(logging.ERROR)
import time import time
from pyGpt4All.config import load_config, save_config from gpt4all_api.config import load_config, save_config
from pyGpt4All.api import GPT4AllAPI from gpt4all_api.api import GPT4AllAPI
import shutil import shutil
import markdown import markdown

View File

@ -9,8 +9,8 @@
###### ######
from pathlib import Path from pathlib import Path
from typing import Callable from typing import Callable
from pygpt4all import GPT4All as Model from gpt4all import GPT4All
from pyGpt4All.backend import GPTBackend from gpt4all_api.backend import GPTBackend
import yaml import yaml
__author__ = "parisneo" __author__ = "parisneo"
@ -30,13 +30,10 @@ class GPT4ALL(GPTBackend):
config (dict): The configuration file config (dict): The configuration file
""" """
super().__init__(config, False) super().__init__(config, False)
self.model = GPT4All.get_model_from_name(self.config['model'])
self.model = Model( self.model.load_model(
model_path=f"./models/gpt4all/{self.config['model']}", model_path=f"./models/gpt_4all/{self.config['model']}",
prompt_context="", prompt_prefix="", prompt_suffix="", )
n_ctx=self.config['ctx_size'],
seed=self.config['seed'],
)
def stop_generation(self): def stop_generation(self):
self.model._grab_text_callback() self.model._grab_text_callback()
@ -56,7 +53,6 @@ class GPT4ALL(GPTBackend):
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False. verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
""" """
try: try:
self.model.reset()
for tok in self.model.generate(prompt, for tok in self.model.generate(prompt,
n_predict=n_predict, n_predict=n_predict,
temp=self.config['temperature'], temp=self.config['temperature'],
@ -64,7 +60,7 @@ class GPT4ALL(GPTBackend):
top_p=self.config['top_p'], top_p=self.config['top_p'],
repeat_penalty=self.config['repeat_penalty'], repeat_penalty=self.config['repeat_penalty'],
repeat_last_n = self.config['repeat_last_n'], repeat_last_n = self.config['repeat_last_n'],
n_threads=self.config['n_threads'], # n_threads=self.config['n_threads'],
): ):
if not new_text_callback(tok): if not new_text_callback(tok):
return return

View File

@ -0,0 +1,70 @@
######
# Project : GPT4ALL-UI
# File : backend.py
# Author : ParisNeo with the help of the community
# Supported by Nomic-AI
# Licence : Apache 2.0
# Description :
# This is an interface class for GPT4All-ui backends.
######
from pathlib import Path
from typing import Callable
from pygptj.model import Model
from gpt4all_api.backend import GPTBackend
__author__ = "parisneo"
__github__ = "https://github.com/nomic-ai/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
backend_name = "GptJ"
class GptJ(GPTBackend):
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a LLAMACPP backend
Args:
config (dict): The configuration file
"""
super().__init__(config, False)
self.model = Model(
model_path=f"./models/llama_cpp/{self.config['model']}",
prompt_context="", prompt_prefix="", prompt_suffix="",
n_ctx=self.config['ctx_size'],
seed=self.config['seed'],
)
def stop_generation(self):
self.model._grab_text_callback()
def generate(self,
prompt:str,
n_predict: int = 128,
new_text_callback: Callable[[str], None] = bool,
verbose: bool = False,
**gpt_params ):
"""Generates text out of a prompt
Args:
prompt (str): The prompt to use for generation
n_predict (int, optional): Number of tokens to prodict. Defaults to 128.
new_text_callback (Callable[[str], None], optional): A callback function that is called everytime a new text element is generated. Defaults to None.
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
"""
try:
self.model.reset()
for tok in self.model.generate(prompt,
n_predict=n_predict,
temp=self.config['temperature'],
top_k=self.config['top_k'],
top_p=self.config['top_p'],
repeat_penalty=self.config['repeat_penalty'],
repeat_last_n = self.config['repeat_last_n'],
n_threads=self.config['n_threads'],
):
if not new_text_callback(tok):
return
except Exception as ex:
print(ex)

View File

@ -10,7 +10,7 @@
from pathlib import Path from pathlib import Path
from typing import Callable from typing import Callable
from gpt4allj import Model from gpt4allj import Model
from pyGpt4All.backend import GPTBackend from gpt4all_api.backend import GPTBackend
import yaml import yaml
__author__ = "parisneo" __author__ = "parisneo"

View File

@ -10,7 +10,7 @@
from pathlib import Path from pathlib import Path
from typing import Callable from typing import Callable
from pyllamacpp.model import Model from pyllamacpp.model import Model
from pyGpt4All.backend import GPTBackend from gpt4all_api.backend import GPTBackend
import yaml import yaml
__author__ = "parisneo" __author__ = "parisneo"

View File

@ -7,7 +7,7 @@ n_threads: 8
host: localhost host: localhost
language: en-US language: en-US
# Supported backends are llamacpp and gpt-j # Supported backends are llamacpp and gpt-j
backend: llamacpp backend: llama_cpp
model: null model: null
n_predict: 1024 n_predict: 1024
nb_messages_to_remember: 5 nb_messages_to_remember: 5

View File

@ -10,7 +10,7 @@
import gc import gc
import sys import sys
from datetime import datetime from datetime import datetime
from pyGpt4All.db import DiscussionsDB from gpt4all_api.db import DiscussionsDB
from pathlib import Path from pathlib import Path
import importlib import importlib
from pyaipersonality import AIPersonality from pyaipersonality import AIPersonality
@ -96,6 +96,7 @@ class ModelProcess:
def _rebuild_model(self): def _rebuild_model(self):
try: try:
print("Rebuilding model")
self.backend = self.load_backend(Path("backends")/self.config["backend"]) self.backend = self.load_backend(Path("backends")/self.config["backend"])
print("Backend loaded successfully") print("Backend loaded successfully")
try: try:
@ -144,21 +145,27 @@ class ModelProcess:
self.ready = True self.ready = True
while True: while True:
self._check_cancel_queue() try:
self._check_clear_queue() self._check_set_config_queue()
self._check_cancel_queue()
self._check_clear_queue()
command = self.generate_queue.get() if not self.generate_queue.empty():
if command is None: command = self.generate_queue.get()
break if command is None:
break
if self.cancel_queue.empty() and self.clear_queue_queue.empty(): if self.cancel_queue.empty() and self.clear_queue_queue.empty():
self.is_generating.value = 1 self.is_generating.value = 1
self.started_queue.put(1) self.started_queue.put(1)
self._generate(*command) self._generate(*command)
while not self.generation_queue.empty(): while not self.generation_queue.empty():
time.sleep(1) time.sleep(1)
self.is_generating.value = 0 self.is_generating.value = 0
time.sleep(1)
except Exception as ex:
time.sleep(1)
print(ex)
def _generate(self, prompt, id, n_predict): def _generate(self, prompt, id, n_predict):
self.id = id self.id = id
if self.config["override_personality_model_parameters"]: if self.config["override_personality_model_parameters"]:
@ -196,6 +203,7 @@ class ModelProcess:
else: else:
# Stream the generated text to the main process # Stream the generated text to the main process
self.generation_queue.put((text,self.id)) self.generation_queue.put((text,self.id))
self._check_set_config_queue()
self._check_cancel_queue() self._check_cancel_queue()
self._check_clear_queue() self._check_clear_queue()
# if stop generation is detected then stop # if stop generation is detected then stop
@ -232,6 +240,7 @@ class ModelProcess:
def _set_config(self, config): def _set_config(self, config):
bk_cfg = self.config bk_cfg = self.config
self.config = config self.config = config
print("Changing configuration")
# verify that the backend is the same # verify that the backend is the same
if self.config["backend"]!=bk_cfg["backend"] or self.config["model"]!=bk_cfg["model"]: if self.config["backend"]!=bk_cfg["backend"] or self.config["model"]!=bk_cfg["model"]:
self._rebuild_model() self._rebuild_model()

0
models/gpt_4all/.keep Normal file
View File

View File

@ -7,7 +7,7 @@ markdown
pyllamacpp==2.1.1 pyllamacpp==2.1.1
gpt4all-j gpt4all-j
pygptj pygptj
pygpt4all gpt4all
--find-links https://download.pytorch.org/whl/cu117 --find-links https://download.pytorch.org/whl/cu117
torch==2.0.0 torch==2.0.0
torchvision torchvision

View File

@ -6,6 +6,6 @@ pyyaml
markdown markdown
pyllamacpp==2.0.0 pyllamacpp==2.0.0
gpt4all-j gpt4all-j
pygpt4all gpt4all
transformers transformers
pyaipersonality>=0.0.11 pyaipersonality>=0.0.11

View File

@ -86,7 +86,6 @@ export default {
handleSelection() { handleSelection() {
if (this.isInstalled && !this.selected) { if (this.isInstalled && !this.selected) {
this.onSelected(this); this.onSelected(this);
this.$set(this, 'selected', true);
} }
} }
} }