Merge branch 'main' into scroll-controls

This commit is contained in:
AndzejsP 2023-05-12 17:08:16 +03:00
commit 75a34ad0f4
6 changed files with 310 additions and 74 deletions

118
app.py
View File

@ -22,6 +22,7 @@ import re
import traceback
import threading
import sys
from tqdm import tqdm
from pyaipersonality import AIPersonality
from pyGpt4All.db import DiscussionsDB, Discussion
from flask import (
@ -39,6 +40,7 @@ import gc
from geventwebsocket.handler import WebSocketHandler
from gevent.pywsgi import WSGIServer
import requests
from concurrent.futures import ThreadPoolExecutor, as_completed
app = Flask("GPT4All-WebUI", static_url_path="/static", static_folder="static")
socketio = SocketIO(app, cors_allowed_origins="*", async_mode='gevent', ping_timeout=30, ping_interval=15)
@ -160,12 +162,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
self.add_endpoint(
"/get_available_models", "get_available_models", self.get_available_models, methods=["GET"]
)
self.add_endpoint(
"/install_model", "install_model", self.install_model, methods=["POST"]
)
self.add_endpoint(
"/uninstall_model", "uninstall_model", self.uninstall_model, methods=["POST"]
)
self.add_endpoint(
"/extensions", "extensions", self.extensions, methods=["GET"]
@ -212,6 +209,85 @@ class Gpt4AllWebUI(GPT4AllAPI):
@socketio.on('disconnect')
def disconnect():
print('Client disconnected')
@socketio.on('install_model')
def install_model(data):
model_path = data["path"]
progress = 0
installation_dir = Path(f'./models/{self.config["backend"]}/')
filename = Path(model_path).name
installation_path = installation_dir / filename
print("Model install requested")
print(f"Model path : {model_path}")
if installation_path.exists():
print("Error: Model already exists")
data.installing = False
socketio.emit('install_progress',{'status': 'failed', 'error': 'model already exists'})
socketio.emit('install_progress',{'status': 'progress', 'progress': progress})
response = requests.get(model_path, stream=True)
file_size = int(response.headers.get('Content-Length'))
downloaded_size = 0
CHUNK_SIZE = 8192
def download_chunk(url, start_byte, end_byte, fileobj):
headers = {'Range': f'bytes={start_byte}-{end_byte}'}
response = requests.get(url, headers=headers, stream=True)
downloaded_bytes = 0
for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
if chunk:
fileobj.seek(start_byte)
fileobj.write(chunk)
downloaded_bytes += len(chunk)
start_byte += len(chunk)
return downloaded_bytes
def download_file(url, file_path, num_threads=4):
response = requests.head(url)
file_size = int(response.headers.get('Content-Length'))
chunk_size = file_size // num_threads
progress = 0
with open(file_path, 'wb') as fileobj:
with tqdm(total=file_size, unit='B', unit_scale=True, unit_divisor=1024) as pbar:
with ThreadPoolExecutor(max_workers=num_threads) as executor:
futures = []
for i in range(num_threads):
start_byte = i * chunk_size
end_byte = start_byte + chunk_size - 1 if i < num_threads - 1 else file_size - 1
futures.append(executor.submit(download_chunk, url, start_byte, end_byte, fileobj))
for future in tqdm(as_completed(futures), total=num_threads):
downloaded_bytes = future.result()
progress += downloaded_bytes
pbar.update(downloaded_bytes)
socketio.emit('install_progress', {'status': 'progress', 'progress': progress})
# Usage example
download_file(model_path, installation_path, num_threads=4)
socketio.emit('install_progress',{'status': 'succeeded', 'error': ''})
@socketio.on('uninstall_model')
def uninstall_model(data):
model_path = data['path']
installation_dir = Path(f'./models/{self.config["backend"]}/')
filename = Path(model_path).name
installation_path = installation_dir / filename
if not installation_path.exists():
socketio.emit('install_progress',{'status': 'failed', 'error': ''})
installation_path.unlink()
socketio.emit('install_progress',{'status': 'succeeded', 'error': ''})
@socketio.on('generate_msg')
def generate_msg(data):
@ -702,7 +778,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
def get_available_models(self):
response = requests.get(f' https://gpt4all.io/models/models.json')
response = requests.get(f'https://gpt4all.io/models/models.json')
model_list = response.json()
models = []
@ -710,7 +786,8 @@ class Gpt4AllWebUI(GPT4AllAPI):
filename = model['filename']
filesize = model['filesize']
path = f'https://gpt4all.io/models/{filename}'
is_installed = Path(f'/models/{self.config["backend"]}/{filename}').is_file()
local_path = Path(f'./models/{self.config["backend"]}/{filename}')
is_installed = local_path.exists()
models.append({
'title': model['filename'],
'icon': '/icons/default.png', # Replace with the path to the model icon
@ -721,31 +798,6 @@ class Gpt4AllWebUI(GPT4AllAPI):
})
return jsonify(models)
def install_model(self):
model_path = request.json.get('path')
installation_dir = Path('/models/llamacpp/')
filename = Path(model_path).name
installation_path = installation_dir / filename
if installation_path.exists():
return jsonify({'status': 'Already installed'})
response = requests.get(model_path)
with open(installation_path, 'wb') as f:
f.write(response.content)
return jsonify({'status':True})
def uninstall_model(self):
model_path = request.json.get('path')
installation_dir = Path('/models/llamacpp/')
filename = Path(model_path).name
installation_path = installation_dir / filename
if not installation_path.exists():
return jsonify({'status':False})
installation_path.unlink()
return jsonify({'status':True})
def get_config(self):
return jsonify(self.config)

View File

@ -17,12 +17,12 @@ __github__ = "https://github.com/nomic-ai/gpt4all-ui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
backend_name = "LLAMACPP"
backend_name = "GPT4ALL"
class LLAMACPP(GPTBackend):
class GPT4ALL(GPTBackend):
file_extension='*.bin'
def __init__(self, config:dict) -> None:
"""Builds a LLAMACPP backend
"""Builds a GPT4ALL backend
Args:
config (dict): The configuration file
@ -30,7 +30,7 @@ class LLAMACPP(GPTBackend):
super().__init__(config, False)
self.model = Model(
model_path=f"./models/llama_cpp/{self.config['model']}",
model_path=f"./models/gpt4all/{self.config['model']}",
prompt_context="", prompt_prefix="", prompt_suffix="",
n_ctx=self.config['ctx_size'],
seed=self.config['seed'],

View File

@ -59,7 +59,7 @@ class GPT4AllAPI():
self.chatbot_bindings = self.create_chatbot()
print("Chatbot created successfully")
except Exception:
except Exception as ex:
self.config["backend"] = "gpt4all"
self.backend = self.load_backend(self.BACKENDS_LIST[self.config["backend"]])
self.config["model"] = None

View File

@ -1,8 +1,5 @@
<template>
<div
class="flex items-center p-4 hover:bg-primary-light rounded-lg mb-2 shadow-lg"
:class="{ 'bg-primary-light': selected }"
>
<div class="flex items-center p-4 hover:bg-primary-light rounded-lg mb-2 shadow-lg">
<div class="flex-shrink-0">
<i :class="`fas ${icon} text-xl`"></i>
</div>
@ -22,15 +19,31 @@
<button
class="px-4 py-2 rounded-md text-white font-bold transition-colors duration-300"
:class="[isInstalled ? 'bg-red-500 hover:bg-red-600' : 'bg-green-500 hover:bg-green-600']"
:disabled="installing || uninstalling"
@click="toggleInstall"
>
{{ isInstalled ? 'Uninstall' : 'Install' }}
<template v-if="installing">
<div class="flex items-center space-x-2">
<div class="h-2 w-20 bg-gray-300 rounded"></div>
<span>Installing...</span>
</div>
</template>
<template v-else-if="uninstalling">
<div class="flex items-center space-x-2">
<div class="h-2 w-20 bg-gray-300 rounded"></div>
<span>Uninstalling...</span>
</div>
</template>
<template v-else>
{{ isInstalled ? 'Uninstall' : 'Install' }}
</template>
</button>
</div>
</div>
</template>
<script>
import { socket, state } from '@/services/websocket.js'
export default {
props: {
title: String,
@ -38,16 +51,33 @@ export default {
path: String,
description: String,
isInstalled: Boolean,
onToggleInstall: Function,
selected: Boolean // Use a boolean selected prop
onInstall: Function,
onUninstall: Function,
onSelected: Function,
selected: Boolean
},
data() {
return {
installing: false,
uninstalling: false
};
},
methods: {
toggleInstall() {
this.onToggleInstall(this.isInstalled, this.path);
if (this.isInstalled) {
this.uninstalling = true;
// Simulate uninstallation delay (replace this with your WebSocket logic)
this.onUninstall(this);
} else {
this.installing = true;
this.onInstall(this);
}
},
handleSelection() {
if (this.isInstalled && !this.selected) { // Only emit event if installed and not already selected
this.$emit('update:selected', true);
if (this.isInstalled && !this.selected) {
this.selected=true;
onSelected(this);
}
}
}

View File

@ -52,17 +52,21 @@
</select>
</div>
<div class="m-2">
<label for="model" class="block mb-2 text-sm font-medium text-gray-900 dark:text-white">
Model:
</label>
<select id="model"
class="bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5 dark:bg-gray-700 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500">
</div>
</div>
<div
class="flex flex-col mb-2 p-3 rounded-lg bg-bg-light-tone dark:bg-bg-dark-tone hover:bg-bg-light-tone-panel hover:dark:bg-bg-dark-tone-panel duration-150 shadow-lg">
<div class="flex flex-row ">
<button @click.stop="bec_collapsed = !bec_collapsed"
class="text-2xl hover:text-primary duration-75 p-2 -m-2 w-full text-left active:translate-y-1">
<!-- <i data-feather="chevron-right"></i> -->
<option v-for="item in modelsArr" :selected="item === configFile.model">{{ item }}</option>
</select>
</div>
<h3 class="text-lg font-semibold cursor-pointer select-none "
@click.stop="bec_collapsed = !bec_collapsed">
Models zoo</h3>
</button>
</div>
<div :class="{ 'hidden': bec_collapsed }" class="flex flex-col mb-2 p-2">
<div v-if="models.length > 0" class="my-2">
<label for="model" class="block ml-2 mb-2 text-sm font-medium text-gray-900 dark:text-white">
Install more models:
@ -70,12 +74,13 @@
<div class="overflow-y-auto max-h-96 no-scrollbar p-2">
<model-entry v-for="(model, index) in models" :key="index" :title="model.title" :icon="model.icon"
:path="model.path" :description="model.description" :is-installed="model.isInstalled"
:on-toggle-install="toggleInstall" />
:on-install="onInstall"
:on-uninstall="onUninstall"
:on-selected="onSelected" />
</div>
</div>
</div>
</div>
<!-- PERSONALITY -->
<div
class="flex flex-col mb-2 p-3 rounded-lg bg-bg-light-tone dark:bg-bg-dark-tone hover:bg-bg-light-tone-panel hover:dark:bg-bg-dark-tone-panel duration-150 shadow-lg">
@ -292,6 +297,7 @@ import { nextTick } from 'vue'
import MessageBox from "@/components/MessageBox.vue";
import YesNoDialog from "@/components/YesNoDialog.vue";
import ModelEntry from '@/components/ModelEntry.vue';
import { socket, state } from '@/services/websocket.js'
axios.defaults.baseURL = import.meta.env.VITE_GPT4ALL_API_BASEURL
export default {
components: {
@ -309,6 +315,8 @@ export default {
data() {
return {
// Websocket
socket: socket,
// Models zoo installer stuff
models: [],
// Accordeon stuff
@ -317,7 +325,6 @@ export default {
mc_collapsed: false,
// Settings stuff
backendsArr: [],
modelsArr: [],
persLangArr: [],
persCatgArr: [],
persArr: [],
@ -339,19 +346,67 @@ export default {
console.log(error);
});
},
onSelected(model_object){
console.log("Selected model")
update_setting('model', model_object.title)
},
// Model installation
toggleInstall(isInstalled, path) {
const endpoint = isInstalled ? '/uninstall_model' : '/install_model';
axios.post(endpoint, { path })
.then((response) => {
console.log(response.data.status);
onInstall(model_object) {
let isInstalled = model_object.isInstalled
let path = model_object.path
this.showProgress = true;
this.progress = 0;
console.log("installing...")
const progressListener = (response) => {
if (response.status === 'progress') {
this.progress = message.progress;
} else if (response.status === 'succeeded') {
// Installation completed
model_object.installing = false;
this.showProgress = false;
// Update the isInstalled property of the corresponding model
const index = this.models.findIndex((model) => model.path === path);
this.models[index].isInstalled = true;
this.socket.off('install_progress', progressListener);
} else if (response.status === 'failed') {
// Installation failed or encountered an error
model_object.installing = false;
this.showProgress = false;
this.socket.off('install_progress', progressListener);
console.error('Installation failed:', message.error);
}
};
this.socket.on('install_progress', progressListener);
this.socket.emit('install_model', { path: path });
},
onUninstall(model_object) {
console.log("uninstalling model...")
const progressListener = (response) => {
if (response.status === 'progress') {
this.progress = message.progress;
} else if (response.status === 'succeeded') {
// Installation completed
model_object.uninstalling = false;
this.showProgress = false;
// Update the isInstalled property of the corresponding model
const index = this.models.findIndex((model) => model.path === path);
this.$set(this.models[index], 'isInstalled', isInstalled);
})
.catch((error) => {
console.error(error);
});
model_object.isInstalled = false;
this.socket.off('install_progress', progressListener);
} else if (response.status === 'failed') {
// Installation failed or encountered an error
model_object.uninstalling = false;
this.showProgress = false;
this.socket.off('install_progress', progressListener);
console.error('Installation failed:', message.error);
}
};
this.socket.on('install_progress', progressListener);
this.socket.emit('uninstall_model', { path: model_object.path });
},
// messagebox ok stuff
onMessageBoxOk() {
@ -368,7 +423,19 @@ export default {
//this.api_get_req("list_personalities_categories").then(response => { this.persCatgArr = response })
//this.api_get_req("list_personalities").then(response => { this.persArr = response })
//this.api_get_req("list_languages").then(response => { this.langArr = response })
this.api_get_req("get_config").then(response => { this.configFile = response })
this.api_get_req("get_config").then(response => {
this.configFile = response
console.log("selecting model")
self.models.forEach(model => {
console.log(`${model} -> ${response["model"]}`)
if(model.title==response["model"]){
model.selected=true;
}
else{
model.selected=false;
}
});
})
},
// Accordeon stuff
toggleAccordion() {
@ -467,7 +534,6 @@ export default {
this.configFile = await this.api_get_req("get_config")
this.backendsArr = await this.api_get_req("list_backends")
this.modelsArr = await this.api_get_req("list_models")
this.persLangArr = await this.api_get_req("list_personalities_languages")
this.persCatgArr = await this.api_get_req("list_personalities_categories")
this.persArr = await this.api_get_req("list_personalities")

View File

@ -0,0 +1,88 @@
[
{
"md5sum": "81a09a0ddf89690372fc296ff7f625af",
"filename": "ggml-gpt4all-j-v1.3-groovy.bin",
"filesize": "3785248281",
"isDefault": "true",
"bestGPTJ": "true",
"description": "Current best commercially licensable model based on GPT-J and trained by Nomic AI on the latest curated GPT4All dataset."
},
{
"md5sum": "91f886b68fbce697e9a3cd501951e455",
"filename": "ggml-gpt4all-l13b-snoozy.bin",
"filesize": "8136770688",
"bestLlama": "true",
"description": "Current best non-commercially licensable model based on Llama 13b and trained by Nomic AI on the latest curated GPT4All dataset."
},
{
"md5sum": "756249d3d6abe23bde3b1ae272628640",
"filename": "ggml-mpt-7b-chat.bin",
"filesize": "4854401050",
"isDefault": "true",
"bestMPT": "true",
"requires": "2.4.1",
"description": "Current best non-commercially licensable chat model based on MPT and trained by Mosaic ML."
},
{
"md5sum": "879344aaa9d62fdccbda0be7a09e7976",
"filename": "ggml-gpt4all-j-v1.2-jazzy.bin",
"filesize": "3785248281",
"description": "A commercially licensable model based on GPT-J and trained by Nomic AI on the v2 GPT4All dataset."
},
{
"md5sum": "61d48a82cb188cceb14ebb8082bfec37",
"filename": "ggml-gpt4all-j-v1.1-breezy.bin",
"filesize": "3785248281",
"description": "A commercially licensable model based on GPT-J and trained by Nomic AI on the v1 GPT4All dataset."
},
{
"md5sum": "5b5a3f9b858d33b29b52b89692415595",
"filename": "ggml-gpt4all-j.bin",
"filesize": "3785248281",
"description": "A commercially licensable model based on GPT-J and trained by Nomic AI on the v0 GPT4All dataset."
},
{
"md5sum": "29119f8fa11712704c6b22ac5ab792ea",
"filename": "ggml-vicuna-7b-1.1-q4_2.bin",
"filesize": "4212859520",
"description": "A non-commercially licensable model based on Llama 7b and trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego."
},
{
"md5sum": "95999b7b0699e2070af63bf5d34101a8",
"filename": "ggml-vicuna-13b-1.1-q4_2.bin",
"filesize": "8136770688",
"description": "A non-commercially licensable model based on Llama 13b and trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego."
},
{
"md5sum": "99e6d129745a3f1fb1121abed747b05a",
"filename": "ggml-wizardLM-7B.q4_2.bin",
"filesize": "4212864640",
"description": "A non-commercially licensable model based on Llama 7b and trained by Microsoft and Peking University."
},
{
"md5sum": "6cb4ee297537c9133bddab9692879de0",
"filename": "ggml-stable-vicuna-13B.q4_2.bin",
"filesize": "8136777088",
"description": "A non-commercially licensable model based on Llama 13b and RLHF trained by Stable AI."
},
{
"md5sum": "120c32a51d020066288df045ef5d52b9",
"filename": "ggml-mpt-7b-base.bin",
"filesize": "4854401028",
"requires": "2.4.1",
"description": "A commercially licensable model base pre-trained by Mosaic ML."
},
{
"md5sum": "d5eafd5b0bd0d615cfd5fd763f642dfe",
"filename": "ggml-nous-gpt4-vicuna-13b.bin",
"filesize": "8136777088",
"description": "A non-commercially licensable model based on Vicuna 13b, fine-tuned on ~180,000 instructions, trained by Nous Research."
},
{
"md5sum": "1cfa4958f489f0a0d1ffdf6b37322809",
"filename": "ggml-mpt-7b-instruct.bin",
"filesize": "4854401028",
"requires": "2.4.1",
"description": "A commericially licensable instruct model based on MPT and trained by Mosaic ML."
}
]