Merge branch 'nomic-ai:main' into main

This commit is contained in:
tkocou 2023-05-14 11:18:13 -04:00 committed by GitHub
commit 31eeef6ff6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 155 additions and 175 deletions

View File

@ -19,6 +19,7 @@ import threading
import time
import requests
import urllib.request
from tqdm import tqdm
__author__ = "parisneo"
__github__ = "https://github.com/nomic-ai/gpt4all-ui"
@ -38,6 +39,7 @@ class ModelProcess:
self.started_queue = mp.Queue()
self.process = None
self.is_generating = mp.Value('i', 0)
self.model_ready = mp.Value('i', 0)
self.ready = False
def load_backend(self, backend_path):
@ -103,6 +105,7 @@ class ModelProcess:
model_file = Path("models")/self.config["backend"]/self.config["model"]
print(f"Loading model : {model_file}")
self.model = self.backend(self.config)
self.model_ready.value = 1
print("Model created successfully")
except Exception as ex:
print("Couldn't build model")
@ -138,12 +141,15 @@ class ModelProcess:
def _run(self):
self._rebuild_model()
self._rebuild_personality()
self._generate("I",0,1)
print()
print("Ready to receive data")
print(f"Listening on :http://{self.config['host']}:{self.config['port']}")
if self.model_ready.value == 1:
self._generate("I",0,1)
print()
print("Ready to receive data")
else:
print("No model loaded. Waiting for new configuration instructions")
self.ready = True
print(f"Listening on :http://{self.config['host']}:{self.config['port']}")
while True:
try:
self._check_set_config_queue()
@ -312,7 +318,6 @@ class GPT4AllAPI():
if installation_path.exists():
print("Error: Model already exists")
data.installing = False
socketio.emit('install_progress',{'status': 'failed', 'error': 'model already exists'})
socketio.emit('install_progress',{'status': 'progress', 'progress': progress})
@ -342,20 +347,33 @@ class GPT4AllAPI():
@socketio.on('generate_msg')
def generate_msg(data):
if self.current_discussion is None:
if self.db.does_last_discussion_have_messages():
self.current_discussion = self.db.create_discussion()
else:
self.current_discussion = self.db.load_last_discussion()
if self.process.model_ready.value==1:
if self.current_discussion is None:
if self.db.does_last_discussion_have_messages():
self.current_discussion = self.db.create_discussion()
else:
self.current_discussion = self.db.load_last_discussion()
message = data["prompt"]
message_id = self.current_discussion.add_message(
"user", message, parent=self.message_id
)
message = data["prompt"]
message_id = self.current_discussion.add_message(
"user", message, parent=self.message_id
)
self.current_user_message_id = message_id
tpe = threading.Thread(target=self.start_message_generation, args=(message, message_id))
tpe.start()
self.current_user_message_id = message_id
tpe = threading.Thread(target=self.start_message_generation, args=(message, message_id))
tpe.start()
else:
self.socketio.emit('infos',
{
"status":'model_not_ready',
"type": "input_message_infos",
"bot": self.personality.name,
"user": self.personality.user_name,
"message":"",
"user_message_id": self.current_user_message_id,
"ai_message_id": self.current_ai_message_id,
}
)
@socketio.on('generate_msg_from')
def handle_connection(data):
@ -390,23 +408,42 @@ class GPT4AllAPI():
def download_file(self, url, installation_path, callback=None):
"""
Downloads a file from a URL and displays the download progress using tqdm.
Downloads a file from a URL, reports the download progress using a callback function, and displays a progress bar.
Args:
url (str): The URL of the file to download.
installation_path (str): The path where the file should be saved.
callback (function, optional): A callback function to be called during the download
with the progress percentage as an argument. Defaults to None.
"""
def report_hook(count, block_size, total_size):
try:
response = requests.get(url, stream=True)
# Get the file size from the response headers
total_size = int(response.headers.get('content-length', 0))
with open(installation_path, 'wb') as file:
downloaded_size = 0
with tqdm(total=total_size, unit='B', unit_scale=True, ncols=80) as progress_bar:
for chunk in response.iter_content(chunk_size=8192):
if chunk:
file.write(chunk)
downloaded_size += len(chunk)
if callback is not None:
percentage = (downloaded_size / total_size) * 100
callback(percentage)
progress_bar.update(len(chunk))
if callback is not None:
percentage = (count * block_size / total_size) * 100
callback(percentage)
callback(100.0)
urllib.request.urlretrieve(url, installation_path, reporthook=report_hook)
if callback is not None:
callback(100.0)
print("File downloaded successfully")
except Exception as e:
print("Couldn't download file:", str(e))
def load_backend(self, backend_path):
# define the full absolute path to the module
@ -544,6 +581,7 @@ class GPT4AllAPI():
) # first the content is empty, but we'll fill it at the end
self.socketio.emit('infos',
{
"status":'generation_started',
"type": "input_message_infos",
"bot": self.personality.name,
"user": self.personality.user_name,

View File

@ -22,14 +22,24 @@ socket.on('disconnect', function() {
console.log("Disconnected")
});
socket.on('infos', function(msg) {
if(globals.user_msg){
globals.user_msg.setSender(msg.user);
globals.user_msg.setMessage(msg.message);
globals.user_msg.setID(msg.id);
}
globals.bot_msg.setSender(msg.bot);
globals.bot_msg.setID(msg.ai_message_id);
globals.bot_msg.messageTextElement.innerHTML = `Generating answer. Please stand by...`;
console.log(msg)
if(msg["status"]=="generation_started"){
if(globals.user_msg){
globals.user_msg.setSender(msg.user);
globals.user_msg.setMessage(msg.message);
globals.user_msg.setID(msg.id);
}
globals.bot_msg.setSender(msg.bot);
globals.bot_msg.setID(msg.ai_message_id);
globals.bot_msg.messageTextElement.innerHTML = `Generating answer. Please stand by...`;
}
else{
globals.sendbtn.style.display="block";
globals.waitAnimation.style.display="none";
globals.stopGeneration.style.display = "none";
globals.is_generating = false
alert("It seems that no model has been loaded. Please download and install a model first, then try again.");
}
});
socket.on('waiter', function(msg) {

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

4
web/dist/index.html vendored
View File

@ -6,8 +6,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>GPT4All - WEBUI</title>
<script type="module" crossorigin src="/assets/index-afd2c992.js"></script>
<link rel="stylesheet" href="/assets/index-98afc6c1.css">
<script type="module" crossorigin src="/assets/index-8b53a73b.js"></script>
<link rel="stylesheet" href="/assets/index-94947680.css">
</head>
<body>
<div id="app"></div>

View File

@ -26,9 +26,9 @@
<template v-if="installing">
<div class="flex items-center space-x-2">
<div class="h-2 w-20 bg-gray-300 rounded">
<div :style="{ width: progress + '%' }" class="h-full bg-green-500"></div>
<div :style="{ width: progress + '%'}" class="h-full bg-red-500 rounded"></div>
</div>
<span>Installing...</span>
<span>Installing...{{ Math.floor(progress) }}%</span>
</div>
</template>
<template v-else-if="uninstalling">
@ -51,10 +51,6 @@
import socket from '@/services/websocket.js'
export default {
props: {
progress: {
type: Number,
default: 0
},
title: String,
icon: String,
path: String,
@ -67,6 +63,7 @@ export default {
},
data() {
return {
progress: 0,
installing: false,
uninstalling: false
};

View File

@ -5,6 +5,16 @@
role="alert">
<div class="flex flex-row">
<slot>
<div v-if="success"
class="inline-flex items-center justify-center flex-shrink-0 w-8 h-8 text-green-500 bg-green-100 rounded-lg dark:bg-green-800 dark:text-green-200">
<i data-feather="check"></i>
<span class="sr-only">Check icon</span>
</div>
<div v-if="!success" class="inline-flex items-center justify-center flex-shrink-0 w-8 h-8 text-red-500 bg-red-100 rounded-lg dark:bg-red-800 dark:text-red-200">
<i data-feather="x"></i>
<span class="sr-only">Cross icon</span>
</div>
<div class="ml-3 text-sm font-normal">{{ message }}</div>
</slot>
</div>
@ -19,6 +29,7 @@
clip-rule="evenodd"></path>
</svg>
</button>
</div>
</div>
</template>
@ -34,12 +45,23 @@ export default {
data() {
return {
show: false,
success: true,
message: ''
};
},
methods: {
close() {
this.$emit('close')
this.show = false
},
showToast(message, duration_s=3, success= true){
this.success = success;
this.message = message;
this.show = true;
setTimeout(() => {
this.$emit('close')
this.show = false
}, duration_s*1000);
}
},
watch: {

View File

@ -131,13 +131,7 @@
</div>
</div>
<Toast :showProp="isCopiedToClipboard" @close="isCopiedToClipboard = false">
<div
class="inline-flex items-center justify-center flex-shrink-0 w-8 h-8 text-green-500 bg-green-100 rounded-lg dark:bg-green-800 dark:text-green-200">
<i data-feather="check"></i>
<span class="sr-only">Check icon</span>
</div>
<div class="ml-3 text-sm font-normal">Message content copied to clipboard!</div>
<Toast ref="toast">
</Toast>
</template>
@ -166,7 +160,7 @@ export default {
isSelectAll: false,
showConfirmation: false,
chime: new Audio("chime_aud.wav"),
isCopiedToClipboard: false,
showToast: false,
isSearch: false,
isDiscussionBottom: false,
}
@ -427,40 +421,48 @@ export default {
// Update previous message with reponse user data
//
// msgObj
//
// "status": "if the model is not ready this will inform the user that he can't promt the model"
// "type": "input_message_infos",
// "bot": self.personality.name,
// "user": self.personality.user_name,
// "message":message,#markdown.markdown(message),
// "user_message_id": self.current_user_message_id,
// "ai_message_id": self.current_ai_message_id,
this.updateLastUserMsg(msgObj)
// Create response message
let responseMessage = {
content: "✍ please stand by ...",//msgObj.message,
id: msgObj.ai_message_id,
parent: msgObj.user_message_id,
rank: 0,
sender: msgObj.bot,
//type: msgObj.type
}
this.discussionArr.push(responseMessage)
nextTick(() => {
const msgList = document.getElementById('messages-list')
this.scrollBottom(msgList)
})
if (this.currentDiscussion.title === '' || this.currentDiscussion.title === null) {
if (msgObj.type == "input_message_infos") {
// This is a user input
this.changeTitleUsingUserMSG(this.currentDiscussion.id, msgObj.message)
console.log(msgObj);
if(msgObj["status"]=="generation_started"){
this.updateLastUserMsg(msgObj)
// Create response message
let responseMessage = {
content: "✍ please stand by ...",//msgObj.message,
id: msgObj.ai_message_id,
parent: msgObj.user_message_id,
rank: 0,
sender: msgObj.bot,
//type: msgObj.type
}
this.discussionArr.push(responseMessage)
nextTick(() => {
const msgList = document.getElementById('messages-list')
this.scrollBottom(msgList)
})
if (this.currentDiscussion.title === '' || this.currentDiscussion.title === null) {
if (msgObj.type == "input_message_infos") {
// This is a user input
this.changeTitleUsingUserMSG(this.currentDiscussion.id, msgObj.message)
}
}
console.log("infos", msgObj)
}
else{
this.$refs.toast.showToast("It seems that no model has been loaded. Please download and install a model first, then try again.",4, false)
this.isGenerating = false
this.setDiscussionLoading(this.currentDiscussion.id, this.isGenerating)
this.chime.play()
}
console.log("infos", msgObj)
},
sendMsg(msg) {
// Sends message to backend
@ -746,15 +748,14 @@ export default {
this.chime.play()
},
copyToClipBoard(content) {
this.isCopiedToClipboard = true
this.$refs.toast.showToast("Copied to clipboard successfully")
nextTick(() => {
feather.replace()
})
},
closeToast() {
this.isCopiedToClipboard = false
this.showToast = false
},

View File

@ -360,7 +360,7 @@ export default {
console.log("received something");
if (response.status === 'progress') {
console.log(`Progress = ${response.progress}`);
this.progress = response.progress;
model_object.progress = response.progress
} else if (response.status === 'succeeded') {
socket.off('install_progress', progressListener);
// Update the isInstalled property of the corresponding model
@ -373,7 +373,7 @@ export default {
// Installation failed or encountered an error
model_object.installing = false;
this.showProgress = false;
console.error('Installation failed:', message.error);
console.error('Installation failed:', response.error);
}
};
@ -385,7 +385,7 @@ export default {
console.log("uninstalling model...")
const progressListener = (response) => {
if (response.status === 'progress') {
this.progress = message.progress;
this.progress = response.progress;
} else if (response.status === 'succeeded') {
console.log(model_object)
// Installation completed

View File

@ -1,88 +0,0 @@
[
{
"md5sum": "81a09a0ddf89690372fc296ff7f625af",
"filename": "ggml-gpt4all-j-v1.3-groovy.bin",
"filesize": "3785248281",
"isDefault": "true",
"bestGPTJ": "true",
"description": "Current best commercially licensable model based on GPT-J and trained by Nomic AI on the latest curated GPT4All dataset."
},
{
"md5sum": "91f886b68fbce697e9a3cd501951e455",
"filename": "ggml-gpt4all-l13b-snoozy.bin",
"filesize": "8136770688",
"bestLlama": "true",
"description": "Current best non-commercially licensable model based on Llama 13b and trained by Nomic AI on the latest curated GPT4All dataset."
},
{
"md5sum": "756249d3d6abe23bde3b1ae272628640",
"filename": "ggml-mpt-7b-chat.bin",
"filesize": "4854401050",
"isDefault": "true",
"bestMPT": "true",
"requires": "2.4.1",
"description": "Current best non-commercially licensable chat model based on MPT and trained by Mosaic ML."
},
{
"md5sum": "879344aaa9d62fdccbda0be7a09e7976",
"filename": "ggml-gpt4all-j-v1.2-jazzy.bin",
"filesize": "3785248281",
"description": "A commercially licensable model based on GPT-J and trained by Nomic AI on the v2 GPT4All dataset."
},
{
"md5sum": "61d48a82cb188cceb14ebb8082bfec37",
"filename": "ggml-gpt4all-j-v1.1-breezy.bin",
"filesize": "3785248281",
"description": "A commercially licensable model based on GPT-J and trained by Nomic AI on the v1 GPT4All dataset."
},
{
"md5sum": "5b5a3f9b858d33b29b52b89692415595",
"filename": "ggml-gpt4all-j.bin",
"filesize": "3785248281",
"description": "A commercially licensable model based on GPT-J and trained by Nomic AI on the v0 GPT4All dataset."
},
{
"md5sum": "29119f8fa11712704c6b22ac5ab792ea",
"filename": "ggml-vicuna-7b-1.1-q4_2.bin",
"filesize": "4212859520",
"description": "A non-commercially licensable model based on Llama 7b and trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego."
},
{
"md5sum": "95999b7b0699e2070af63bf5d34101a8",
"filename": "ggml-vicuna-13b-1.1-q4_2.bin",
"filesize": "8136770688",
"description": "A non-commercially licensable model based on Llama 13b and trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego."
},
{
"md5sum": "99e6d129745a3f1fb1121abed747b05a",
"filename": "ggml-wizardLM-7B.q4_2.bin",
"filesize": "4212864640",
"description": "A non-commercially licensable model based on Llama 7b and trained by Microsoft and Peking University."
},
{
"md5sum": "6cb4ee297537c9133bddab9692879de0",
"filename": "ggml-stable-vicuna-13B.q4_2.bin",
"filesize": "8136777088",
"description": "A non-commercially licensable model based on Llama 13b and RLHF trained by Stable AI."
},
{
"md5sum": "120c32a51d020066288df045ef5d52b9",
"filename": "ggml-mpt-7b-base.bin",
"filesize": "4854401028",
"requires": "2.4.1",
"description": "A commercially licensable model base pre-trained by Mosaic ML."
},
{
"md5sum": "d5eafd5b0bd0d615cfd5fd763f642dfe",
"filename": "ggml-nous-gpt4-vicuna-13b.bin",
"filesize": "8136777088",
"description": "A non-commercially licensable model based on Vicuna 13b, fine-tuned on ~180,000 instructions, trained by Nous Research."
},
{
"md5sum": "1cfa4958f489f0a0d1ffdf6b37322809",
"filename": "ggml-mpt-7b-instruct.bin",
"filesize": "4854401028",
"requires": "2.4.1",
"description": "A commericially licensable instruct model based on MPT and trained by Mosaic ML."
}
]