multiple bugfixes and upgrades

This commit is contained in:
ParisNeo 2023-04-23 16:59:00 +02:00
parent 6a38876ab9
commit f6be8d9dec
10 changed files with 101630 additions and 25 deletions

View File

@ -11,9 +11,9 @@ COPY ./pyGpt4All/api.py /srv/pyGpt4All/api.py
COPY ./pyGpt4All/db.py /srv/pyGpt4All/db.py
COPY ./pyGpt4All/config.py /srv/pyGpt4All/config.py
COPY ./pyGpt4All/extension.py /srv/pyGpt4All/extension.py
COPY ./pyGpt4All/backends/backend.py /srv/backends/backend.py
COPY ./pyGpt4All/backends/llama_cpp.py /srv/backends/llama_cpp.py
COPY ./pyGpt4All/backends/gpt_j.py /srv/backends/gpt_j.py
COPY ./pyGpt4All/backends/backend.py /srv/pyGpt4All/backends/backend.py
COPY ./pyGpt4All/backends/llama_cpp.py /srv/pyGpt4All/backends/llama_cpp.py
COPY ./pyGpt4All/backends/gpt_j.py /srv/pyGpt4All/backends/gpt_j.py
COPY ./static /srv/static
COPY ./templates /srv/templates

7
app.py
View File

@ -149,7 +149,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
def list_models(self):
models_dir = Path('./models')/self.config["backend"] # replace with the actual path to the models folder
models = [f.name for f in models_dir.glob('*.bin')]
models = [f.name for f in models_dir.glob(self.chatbot_bindings.file_extension)]
return jsonify(models)
@ -389,13 +389,14 @@ class Gpt4AllWebUI(GPT4AllAPI):
self.config['backend'] = backend
models_dir = Path('./models')/self.config["backend"] # replace with the actual path to the models folder
models = [f.name for f in models_dir.glob('*.bin')]
models = [f.name for f in models_dir.glob(self.chatbot_bindings.file_extension)]
if len(models)>0:
self.config['model'] = models[0]
self.create_chatbot()
return jsonify({"status": "ok"})
return jsonify({"status": "error"})
def update_model_params(self):
data = request.get_json()
backend = str(data["backend"])

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,9 @@
from pyGpt4All.backends.llama_cpp import LLAMACPP
from pyGpt4All.backends.gpt_j import GPT_J
from pyGpt4All.backends.transformers import Transformers
BACKENDS_LIST={
"llama_cpp":LLAMACPP,
"gpt_j":GPT_J
"gpt_j":GPT_J,
"transformers":Transformers
}

View File

@ -17,8 +17,10 @@ __license__ = "Apache 2.0"
class GPTBackend:
file_extension='*.bin'
def __init__(self, config:dict) -> None:
self.config = config
def generate(self,
prompt:str,
n_predict: int = 128,

View File

@ -19,6 +19,7 @@ __license__ = "Apache 2.0"
class GPT_J(GPTBackend):
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a GPT-J backend

View File

@ -19,6 +19,7 @@ __license__ = "Apache 2.0"
class LLAMACPP(GPTBackend):
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a LLAMACPP backend

View File

@ -20,6 +20,7 @@ __license__ = "Apache 2.0"
class Transformers(GPTBackend):
file_extension='*'
def __init__(self, config:dict) -> None:
"""Builds a GPT-J backend
@ -28,7 +29,7 @@ class Transformers(GPTBackend):
"""
super().__init__(config)
self.config = config
self.tokenizer = tokenizer = AutoTokenizer.from_pretrained(f"./models/transformers/{self.config['model']}/tokenizer.model", local_files_only=True)
self.tokenizer = tokenizer = AutoTokenizer.from_pretrained(f"./models/transformers/{self.config['model']}/tokenizer.json", local_files_only=True)
self.model = AutoModelForCausalLM.from_pretrained(f"./models/transformers/{self.config['model']}/model.bin", local_files_only=True)

View File

@ -5,3 +5,4 @@ pyyaml
markdown
pyllamacpp==1.0.6
gpt4all-j
transformers

View File

@ -170,6 +170,7 @@ function populate_settings(){
const selectPersonality = document.getElementById('personalities');
function populate_backends(){
selectBackend.innerHTML = "";
// Fetch the list of .bin files from the models subfolder
fetch('/list_backends')
.then(response => response.json())
@ -265,23 +266,25 @@ function populate_settings(){
});
}
function populate_personalities(){
selectPersonality.innerHTML=""
// Fetch the list of .yaml files from the models subfolder
fetch('/list_personalities')
.then(response => response.json())
.then(data => {
if (Array.isArray(data)) {
// data is an array
data.forEach(filename => {
const optionElement = document.createElement('option');
optionElement.value = filename;
optionElement.textContent = filename;
selectPersonality.appendChild(optionElement);
});
} else {
console.error('Expected an array, but received:', data);
}
});
const selectPersonalityLanguage = document.getElementById('personalities_language');
selectPersonality.innerHTML=""
// Fetch the list of .yaml files from the models subfolder
fetch('/list_personalities')
.then(response => response.json())
.then(data => {
if (Array.isArray(data)) {
// data is an array
data.forEach(filename => {
const optionElement = document.createElement('option');
optionElement.value = filename;
optionElement.textContent = filename;
selectPersonality.appendChild(optionElement);
});
} else {
console.error('Expected an array, but received:', data);
}
});
}
function set_personality_language(lang, callback) {