added model checking before sending a

This commit is contained in:
saloui 2023-05-14 11:10:49 +02:00
parent 2232bca24b
commit c1a1183b9b
3 changed files with 96 additions and 60 deletions

View File

@ -38,6 +38,7 @@ class ModelProcess:
self.started_queue = mp.Queue()
self.process = None
self.is_generating = mp.Value('i', 0)
self.model_ready = mp.Value('i', 0)
self.ready = False
def load_backend(self, backend_path):
@ -103,6 +104,7 @@ class ModelProcess:
model_file = Path("models")/self.config["backend"]/self.config["model"]
print(f"Loading model : {model_file}")
self.model = self.backend(self.config)
self.model_ready.value = 1
print("Model created successfully")
except Exception as ex:
print("Couldn't build model")
@ -138,12 +140,15 @@ class ModelProcess:
def _run(self):
self._rebuild_model()
self._rebuild_personality()
if self.model_ready.value == 1:
self._generate("I",0,1)
print()
print("Ready to receive data")
print(f"Listening on :http://{self.config['host']}:{self.config['port']}")
self.ready = True
else:
print("No model loaded. Waiting for new configuration instructions")
self.ready = True
print(f"Listening on :http://{self.config['host']}:{self.config['port']}")
while True:
try:
self._check_set_config_queue()
@ -342,6 +347,7 @@ class GPT4AllAPI():
@socketio.on('generate_msg')
def generate_msg(data):
if self.process.model_ready.value==1:
if self.current_discussion is None:
if self.db.does_last_discussion_have_messages():
self.current_discussion = self.db.create_discussion()
@ -356,6 +362,18 @@ class GPT4AllAPI():
self.current_user_message_id = message_id
tpe = threading.Thread(target=self.start_message_generation, args=(message, message_id))
tpe.start()
else:
self.socketio.emit('infos',
{
"status":'model_not_ready',
"type": "input_message_infos",
"bot": self.personality.name,
"user": self.personality.user_name,
"message":"",
"user_message_id": self.current_user_message_id,
"ai_message_id": self.current_ai_message_id,
}
)
@socketio.on('generate_msg_from')
def handle_connection(data):
@ -397,6 +415,7 @@ class GPT4AllAPI():
callback (function, optional): A callback function to be called during the download
with the progress percentage as an argument. Defaults to None.
"""
try:
def report_hook(count, block_size, total_size):
if callback is not None:
percentage = (count * block_size / total_size) * 100
@ -406,6 +425,8 @@ class GPT4AllAPI():
if callback is not None:
callback(100.0)
except:
print("Couldn't download file")
def load_backend(self, backend_path):
@ -544,6 +565,7 @@ class GPT4AllAPI():
) # first the content is empty, but we'll fill it at the end
self.socketio.emit('infos',
{
"status":'generation_started',
"type": "input_message_infos",
"bot": self.personality.name,
"user": self.personality.user_name,

View File

@ -22,14 +22,24 @@ socket.on('disconnect', function() {
console.log("Disconnected")
});
socket.on('infos', function(msg) {
if(globals.user_msg){
console.log(msg)
if(msg["status"]=="generation_started"){
if(globals.user_msg){
globals.user_msg.setSender(msg.user);
globals.user_msg.setMessage(msg.message);
globals.user_msg.setID(msg.id);
}
globals.bot_msg.setSender(msg.bot);
globals.bot_msg.setID(msg.ai_message_id);
globals.bot_msg.messageTextElement.innerHTML = `Generating answer. Please stand by...`;
}
globals.bot_msg.setSender(msg.bot);
globals.bot_msg.setID(msg.ai_message_id);
globals.bot_msg.messageTextElement.innerHTML = `Generating answer. Please stand by...`;
}
else{
globals.sendbtn.style.display="block";
globals.waitAnimation.style.display="none";
globals.stopGeneration.style.display = "none";
globals.is_generating = false
alert("It seems that no model has been loaded. Please download and install a model first, then try again.");
}
});
socket.on('waiter', function(msg) {

View File

@ -427,14 +427,14 @@ export default {
// Update previous message with reponse user data
//
// msgObj
//
// "status": "if the model is not ready this will inform the user that he can't promt the model"
// "type": "input_message_infos",
// "bot": self.personality.name,
// "user": self.personality.user_name,
// "message":message,#markdown.markdown(message),
// "user_message_id": self.current_user_message_id,
// "ai_message_id": self.current_ai_message_id,
if(msg["status"]=="generation_started"){
this.updateLastUserMsg(msgObj)
// Create response message
let responseMessage = {
@ -461,6 +461,10 @@ export default {
}
}
console.log("infos", msgObj)
}
else{
alert("It seems that no model has been loaded. Please download and install a model first, then try again.");
}
},
sendMsg(msg) {
// Sends message to backend