added model checking before sending a

This commit is contained in:
saloui 2023-05-14 11:10:49 +02:00
parent 2232bca24b
commit c1a1183b9b
3 changed files with 96 additions and 60 deletions

View File

@ -38,6 +38,7 @@ class ModelProcess:
self.started_queue = mp.Queue() self.started_queue = mp.Queue()
self.process = None self.process = None
self.is_generating = mp.Value('i', 0) self.is_generating = mp.Value('i', 0)
self.model_ready = mp.Value('i', 0)
self.ready = False self.ready = False
def load_backend(self, backend_path): def load_backend(self, backend_path):
@ -103,6 +104,7 @@ class ModelProcess:
model_file = Path("models")/self.config["backend"]/self.config["model"] model_file = Path("models")/self.config["backend"]/self.config["model"]
print(f"Loading model : {model_file}") print(f"Loading model : {model_file}")
self.model = self.backend(self.config) self.model = self.backend(self.config)
self.model_ready.value = 1
print("Model created successfully") print("Model created successfully")
except Exception as ex: except Exception as ex:
print("Couldn't build model") print("Couldn't build model")
@ -138,12 +140,15 @@ class ModelProcess:
def _run(self): def _run(self):
self._rebuild_model() self._rebuild_model()
self._rebuild_personality() self._rebuild_personality()
self._generate("I",0,1) if self.model_ready.value == 1:
print() self._generate("I",0,1)
print("Ready to receive data") print()
print(f"Listening on :http://{self.config['host']}:{self.config['port']}") print("Ready to receive data")
else:
print("No model loaded. Waiting for new configuration instructions")
self.ready = True self.ready = True
print(f"Listening on :http://{self.config['host']}:{self.config['port']}")
while True: while True:
try: try:
self._check_set_config_queue() self._check_set_config_queue()
@ -342,20 +347,33 @@ class GPT4AllAPI():
@socketio.on('generate_msg') @socketio.on('generate_msg')
def generate_msg(data): def generate_msg(data):
if self.current_discussion is None: if self.process.model_ready.value==1:
if self.db.does_last_discussion_have_messages(): if self.current_discussion is None:
self.current_discussion = self.db.create_discussion() if self.db.does_last_discussion_have_messages():
else: self.current_discussion = self.db.create_discussion()
self.current_discussion = self.db.load_last_discussion() else:
self.current_discussion = self.db.load_last_discussion()
message = data["prompt"] message = data["prompt"]
message_id = self.current_discussion.add_message( message_id = self.current_discussion.add_message(
"user", message, parent=self.message_id "user", message, parent=self.message_id
) )
self.current_user_message_id = message_id self.current_user_message_id = message_id
tpe = threading.Thread(target=self.start_message_generation, args=(message, message_id)) tpe = threading.Thread(target=self.start_message_generation, args=(message, message_id))
tpe.start() tpe.start()
else:
self.socketio.emit('infos',
{
"status":'model_not_ready',
"type": "input_message_infos",
"bot": self.personality.name,
"user": self.personality.user_name,
"message":"",
"user_message_id": self.current_user_message_id,
"ai_message_id": self.current_ai_message_id,
}
)
@socketio.on('generate_msg_from') @socketio.on('generate_msg_from')
def handle_connection(data): def handle_connection(data):
@ -397,16 +415,19 @@ class GPT4AllAPI():
callback (function, optional): A callback function to be called during the download callback (function, optional): A callback function to be called during the download
with the progress percentage as an argument. Defaults to None. with the progress percentage as an argument. Defaults to None.
""" """
def report_hook(count, block_size, total_size): try:
def report_hook(count, block_size, total_size):
if callback is not None:
percentage = (count * block_size / total_size) * 100
callback(percentage)
urllib.request.urlretrieve(url, installation_path, reporthook=report_hook)
if callback is not None: if callback is not None:
percentage = (count * block_size / total_size) * 100 callback(100.0)
callback(percentage) except:
print("Couldn't download file")
urllib.request.urlretrieve(url, installation_path, reporthook=report_hook)
if callback is not None:
callback(100.0)
def load_backend(self, backend_path): def load_backend(self, backend_path):
# define the full absolute path to the module # define the full absolute path to the module
@ -544,6 +565,7 @@ class GPT4AllAPI():
) # first the content is empty, but we'll fill it at the end ) # first the content is empty, but we'll fill it at the end
self.socketio.emit('infos', self.socketio.emit('infos',
{ {
"status":'generation_started',
"type": "input_message_infos", "type": "input_message_infos",
"bot": self.personality.name, "bot": self.personality.name,
"user": self.personality.user_name, "user": self.personality.user_name,

View File

@ -22,14 +22,24 @@ socket.on('disconnect', function() {
console.log("Disconnected") console.log("Disconnected")
}); });
socket.on('infos', function(msg) { socket.on('infos', function(msg) {
if(globals.user_msg){ console.log(msg)
globals.user_msg.setSender(msg.user); if(msg["status"]=="generation_started"){
globals.user_msg.setMessage(msg.message); if(globals.user_msg){
globals.user_msg.setID(msg.id); globals.user_msg.setSender(msg.user);
} globals.user_msg.setMessage(msg.message);
globals.bot_msg.setSender(msg.bot); globals.user_msg.setID(msg.id);
globals.bot_msg.setID(msg.ai_message_id); }
globals.bot_msg.messageTextElement.innerHTML = `Generating answer. Please stand by...`; globals.bot_msg.setSender(msg.bot);
globals.bot_msg.setID(msg.ai_message_id);
globals.bot_msg.messageTextElement.innerHTML = `Generating answer. Please stand by...`;
}
else{
globals.sendbtn.style.display="block";
globals.waitAnimation.style.display="none";
globals.stopGeneration.style.display = "none";
globals.is_generating = false
alert("It seems that no model has been loaded. Please download and install a model first, then try again.");
}
}); });
socket.on('waiter', function(msg) { socket.on('waiter', function(msg) {

View File

@ -427,40 +427,44 @@ export default {
// Update previous message with reponse user data // Update previous message with reponse user data
// //
// msgObj // msgObj
// // "status": "if the model is not ready this will inform the user that he can't promt the model"
// "type": "input_message_infos", // "type": "input_message_infos",
// "bot": self.personality.name, // "bot": self.personality.name,
// "user": self.personality.user_name, // "user": self.personality.user_name,
// "message":message,#markdown.markdown(message), // "message":message,#markdown.markdown(message),
// "user_message_id": self.current_user_message_id, // "user_message_id": self.current_user_message_id,
// "ai_message_id": self.current_ai_message_id, // "ai_message_id": self.current_ai_message_id,
if(msg["status"]=="generation_started"){
this.updateLastUserMsg(msgObj) this.updateLastUserMsg(msgObj)
// Create response message // Create response message
let responseMessage = { let responseMessage = {
content: "✍ please stand by ...",//msgObj.message, content: "✍ please stand by ...",//msgObj.message,
id: msgObj.ai_message_id, id: msgObj.ai_message_id,
parent: msgObj.user_message_id, parent: msgObj.user_message_id,
rank: 0, rank: 0,
sender: msgObj.bot, sender: msgObj.bot,
//type: msgObj.type //type: msgObj.type
}
this.discussionArr.push(responseMessage)
nextTick(() => {
const msgList = document.getElementById('messages-list')
this.scrollBottom(msgList)
})
if (this.currentDiscussion.title === '' || this.currentDiscussion.title === null) {
if (msgObj.type == "input_message_infos") {
// This is a user input
this.changeTitleUsingUserMSG(this.currentDiscussion.id, msgObj.message)
} }
this.discussionArr.push(responseMessage)
nextTick(() => {
const msgList = document.getElementById('messages-list')
this.scrollBottom(msgList)
})
if (this.currentDiscussion.title === '' || this.currentDiscussion.title === null) {
if (msgObj.type == "input_message_infos") {
// This is a user input
this.changeTitleUsingUserMSG(this.currentDiscussion.id, msgObj.message)
}
}
console.log("infos", msgObj)
}
else{
alert("It seems that no model has been loaded. Please download and install a model first, then try again.");
} }
console.log("infos", msgObj)
}, },
sendMsg(msg) { sendMsg(msg) {
// Sends message to backend // Sends message to backend