This commit is contained in:
saloui 2023-07-06 11:37:12 +02:00
parent ce89787ec3
commit 8d817cc1d2
3 changed files with 38 additions and 18 deletions

View File

@ -381,13 +381,13 @@ class LoLLMsAPPI(LollmsApplication):
ASCIIColors.green("Starting message generation by"+self.personality.name)
task = self.socketio.start_background_task(self.start_message_generation, message, message_id)
self.socketio.sleep(0)
self.socketio.sleep(0.01)
ASCIIColors.info("Started generation task")
#tpe = threading.Thread(target=self.start_message_generation, args=(message, message_id))
#tpe.start()
else:
self.socketio.emit("buzzy", {"message":"I am buzzy. Come back later."}, room=self.current_room_id)
self.socketio.sleep(0)
self.socketio.sleep(0.01)
ASCIIColors.warning(f"OOps request {self.current_room_id} refused!! Server buzy")
self.socketio.emit('infos',
{
@ -407,7 +407,7 @@ class LoLLMsAPPI(LollmsApplication):
'finished_generating_at': self.current_discussion.current_message_finished_generating_at,
}, room=self.current_room_id
)
self.socketio.sleep(0)
self.socketio.sleep(0.01)
@socketio.on('generate_msg_from')
def handle_connection(data):
@ -694,7 +694,7 @@ class LoLLMsAPPI(LollmsApplication):
'message_type': MSG_TYPE.MSG_TYPE_FULL.value
}, room=self.current_room_id
)
self.socketio.sleep(0)
self.socketio.sleep(0.01)
self.current_discussion.update_message(self.current_ai_message_id, self.current_generated_text)
# if stop generation is detected then stop
if not self.cancel_gen:
@ -721,7 +721,7 @@ class LoLLMsAPPI(LollmsApplication):
'message_type': message_type.value
}, room=self.current_room_id
)
self.socketio.sleep(0)
self.socketio.sleep(0.01)
return True
# Stream the generated text to the frontend
else:
@ -733,7 +733,7 @@ class LoLLMsAPPI(LollmsApplication):
'message_type': message_type.value
}, room=self.current_room_id
)
self.socketio.sleep(0)
self.socketio.sleep(0.01)
return True
@ -832,7 +832,7 @@ class LoLLMsAPPI(LollmsApplication):
'finished_generating_at': self.current_discussion.current_message_finished_generating_at,
}, room=self.current_room_id
)
self.socketio.sleep(0)
self.socketio.sleep(0.01)
# prepare query and reception
self.discussion_messages, self.current_message = self.prepare_query(message_id, is_continue)
@ -869,7 +869,7 @@ class LoLLMsAPPI(LollmsApplication):
}, room=self.current_room_id
)
self.socketio.sleep(0)
self.socketio.sleep(0.01)
ASCIIColors.success(f" ╔══════════════════════════════════════════════════╗ ")
ASCIIColors.success(f" ║ Done ║ ")

3
app.py
View File

@ -60,7 +60,8 @@ log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
app = Flask("GPT4All-WebUI", static_url_path="/static", static_folder="static")
socketio = SocketIO(app, cors_allowed_origins="*", async_mode='gevent', ping_timeout=1200, ping_interval=4000)
# async_mode='gevent', ping_timeout=1200, ping_interval=120,
socketio = SocketIO(app, cors_allowed_origins="*", async_mode='threading',engineio_options={'websocket_compression': False, 'websocket_ping_interval': 20, 'websocket_ping_timeout': 120, 'websocket_max_queue': 100})
app.config['SECRET_KEY'] = 'secret!'
# Set the logging level to WARNING or higher

View File

@ -236,11 +236,27 @@ export default {
return {
// To be synced with the backend database types
msgTypes: {
MSG_TYPE_NORMAL_USER: 0,
MSG_TYPE_NORMAL_AI: 1,
MSG_TYPE_CONDITIONNING: 2,
MSG_TYPE_HIDDEN: 3,
MSG_TYPE_USER_ONLY: 4
// Messaging
MSG_TYPE_CHUNK: 0, // A chunk of a message (used for classical chat)
MSG_TYPE_FULL: 1, // A full message (for some personality the answer is sent in bulk)
// Informations
MSG_TYPE_EXCEPTION: 2, // An exception occured
MSG_TYPE_WARNING: 3, // A warning occured
MSG_TYPE_INFO: 4, // An information to be shown to user
// Steps
MSG_TYPE_STEP: 5, // An instant step (a step that doesn't need time to be executed)
MSG_TYPE_STEP_START: 6, // A step has started (the text contains an explanation of the step done by he personality)
MSG_TYPE_STEP_PROGRESS: 7, // The progress value (the text contains a percentage and can be parsed by the reception)
MSG_TYPE_STEP_END: 8, // A step has been done (the text contains an explanation of the step done by he personality)
//Extra
MSG_TYPE_JSON_INFOS: 9, // A JSON output that is useful for summarizing the process of generation used by personalities like chain of thoughts and tree of thooughts
MSG_TYPE_REF: 10, // References (in form of [text](path))
MSG_TYPE_CODE: 11, // A javascript code to execute
MSG_TYPE_UI: 12 // A vue.js component to show (we need to build some and parse the text to show it)
},
list: [], // Discussion list
tempList: [], // Copy of Discussion list (used for keeping the original list during filtering discussions/searching action)
@ -309,9 +325,8 @@ export default {
if (res) {
// Filter out the user and bot entries
this.discussionArr = res.data.filter((item) =>
item.type == this.msgTypes.MSG_TYPE_NORMAL_AI ||
item.type == this.msgTypes.MSG_TYPE_NORMAL_USER ||
item.type == this.msgTypes.MSG_TYPE_USER_ONLY
item.type == this.msgTypes.MSG_TYPE_CHUNK ||
item.type == this.msgTypes.MSG_TYPE_FULL
)
console.log("this.discussionArr")
console.log(this.discussionArr)
@ -734,7 +749,7 @@ export default {
messageItem.content = msgObj.data
}
else{
if (msgObj.type == 1){
if (msgObj.type == this.msgTypes){
messageItem.steps
}
}
@ -744,6 +759,10 @@ export default {
// this.scrollBottom(msgList)
// })
}
// Force an immediate UI update
this.$nextTick(() => {
// UI updates are rendered here
});
},
async changeTitleUsingUserMSG(id, msg) {