Enhanced stuff

This commit is contained in:
Saifeddine ALOUI 2023-11-25 00:03:43 +01:00
parent 7e95ce173e
commit f57c7817ed
8 changed files with 102 additions and 72 deletions

View File

@ -2641,6 +2641,7 @@ class LoLLMsAPPI(LollmsApplication):
self.start_time = datetime.now() self.start_time = datetime.now()
self.personality.processor.callback = partial(self.process_chunk, client_id=client_id) self.personality.processor.callback = partial(self.process_chunk, client_id=client_id)
self.personality.processor.execute_command(command, parameters) self.personality.processor.execute_command(command, parameters)
self.close_message(client_id)
else: else:
self.notify("Non scripted personalities do not support commands",False,client_id) self.notify("Non scripted personalities do not support commands",False,client_id)
@ -2702,13 +2703,14 @@ class LoLLMsAPPI(LollmsApplication):
self.notify("Please select a discussion first", False, client_id) self.notify("Please select a discussion first", False, client_id)
return return
id_ = data['id'] id_ = data['id']
generation_type = data.get('msg_type',None)
if id_==-1: if id_==-1:
message = self.connections[client_id]["current_discussion"].current_message message = self.connections[client_id]["current_discussion"].current_message
else: else:
message = self.connections[client_id]["current_discussion"].load_message(id_) message = self.connections[client_id]["current_discussion"].load_message(id_)
if message is None: if message is None:
return return
self.connections[client_id]['generation_thread'] = threading.Thread(target=self.start_message_generation, args=(message, message.id, client_id)) self.connections[client_id]['generation_thread'] = threading.Thread(target=self.start_message_generation, args=(message, message.id, client_id, False, generation_type))
self.connections[client_id]['generation_thread'].start() self.connections[client_id]['generation_thread'].start()
@socketio.on('continue_generate_msg_from') @socketio.on('continue_generate_msg_from')
@ -3009,7 +3011,7 @@ class LoLLMsAPPI(LollmsApplication):
def prepare_query(self, client_id: str, message_id: int = -1, is_continue: bool = False, n_tokens: int = 0) -> Tuple[str, str, List[str]]: def prepare_query(self, client_id: str, message_id: int = -1, is_continue: bool = False, n_tokens: int = 0, generation_type = None) -> Tuple[str, str, List[str]]:
""" """
Prepares the query for the model. Prepares the query for the model.
@ -3041,6 +3043,9 @@ class LoLLMsAPPI(LollmsApplication):
# Check if there are document files to add to the prompt # Check if there are document files to add to the prompt
documentation = "" documentation = ""
history = ""
if generation_type != "simple_question":
if self.personality.persona_data_vectorizer: if self.personality.persona_data_vectorizer:
if documentation=="": if documentation=="":
documentation="!@>Documentation:\n" documentation="!@>Documentation:\n"
@ -3071,7 +3076,6 @@ class LoLLMsAPPI(LollmsApplication):
documentation += f"document chunk:\nchunk path: {infos[0]}\nchunk content:{doc}" documentation += f"document chunk:\nchunk path: {infos[0]}\nchunk content:{doc}"
# Check if there is discussion history to add to the prompt # Check if there is discussion history to add to the prompt
history = ""
if self.config.use_discussions_history and self.discussions_store is not None: if self.config.use_discussions_history and self.discussions_store is not None:
if history=="": if history=="":
documentation="!@>History:\n" documentation="!@>History:\n"
@ -3141,6 +3145,7 @@ class LoLLMsAPPI(LollmsApplication):
tokens_accumulated += len(message_tokenized) tokens_accumulated += len(message_tokenized)
if generation_type != "simple_question":
# Accumulate messages starting from message_index # Accumulate messages starting from message_index
for i in range(message_index, -1, -1): for i in range(message_index, -1, -1):
message = messages[i] message = messages[i]
@ -3160,6 +3165,22 @@ class LoLLMsAPPI(LollmsApplication):
# Add the tokenized message to the full_message_list # Add the tokenized message to the full_message_list
full_message_list.insert(0, message_tokenized) full_message_list.insert(0, message_tokenized)
# Update the cumulative number of tokens
tokens_accumulated += len(message_tokenized)
else:
message = messages[message_index]
# Check if the message content is not empty and visible to the AI
if message.content != '' and (
message.message_type <= MSG_TYPE.MSG_TYPE_FULL_INVISIBLE_TO_USER.value and message.message_type != MSG_TYPE.MSG_TYPE_FULL_INVISIBLE_TO_AI.value):
# Tokenize the message content
message_tokenized = self.model.tokenize(
"\n" + self.config.discussion_prompt_separator + message.sender + ": " + message.content.strip())
# Add the tokenized message to the full_message_list
full_message_list.insert(0, message_tokenized)
# Update the cumulative number of tokens # Update the cumulative number of tokens
tokens_accumulated += len(message_tokenized) tokens_accumulated += len(message_tokenized)
@ -3217,7 +3238,7 @@ class LoLLMsAPPI(LollmsApplication):
def notify(self, content, status, client_id=None): def notify(self, content, status=True, client_id=None):
self.socketio.emit('notification', { self.socketio.emit('notification', {
'content': content,# self.connections[client_id]["generated_text"], 'content': content,# self.connections[client_id]["generated_text"],
'status': status 'status': status
@ -3534,7 +3555,7 @@ class LoLLMsAPPI(LollmsApplication):
output = "" output = ""
return output return output
def start_message_generation(self, message, message_id, client_id, is_continue=False): def start_message_generation(self, message, message_id, client_id, is_continue=False, generation_type=None):
if self.personality is None: if self.personality is None:
self.notify("Select a personality",False,None) self.notify("Select a personality",False,None)
return return
@ -3555,7 +3576,7 @@ class LoLLMsAPPI(LollmsApplication):
self.socketio.sleep(0.01) self.socketio.sleep(0.01)
# prepare query and reception # prepare query and reception
self.discussion_messages, self.current_message, tokens = self.prepare_query(client_id, message_id, is_continue) self.discussion_messages, self.current_message, tokens = self.prepare_query(client_id, message_id, is_continue, generation_type=generation_type)
self.prepare_reception(client_id) self.prepare_reception(client_id)
self.generating = True self.generating = True
self.connections[client_id]["processing"]=True self.connections[client_id]["processing"]=True

@ -1 +1 @@
Subproject commit b5f5386efd6fd0916188f225080e1700288df3a0 Subproject commit 708548fcd776162aca4abac930c0dcdb6a36d84e

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

4
web/dist/index.html vendored
View File

@ -6,8 +6,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0"> <meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>LoLLMS WebUI - Welcome</title> <title>LoLLMS WebUI - Welcome</title>
<script type="module" crossorigin src="/assets/index-50afe6ea.js"></script> <script type="module" crossorigin src="/assets/index-2bec6d32.js"></script>
<link rel="stylesheet" href="/assets/index-62d6a6f7.css"> <link rel="stylesheet" href="/assets/index-0e906c43.css">
</head> </head>
<body> <body>
<div id="app"></div> <div id="app"></div>

View File

@ -76,11 +76,17 @@
title="Copy message to clipboard" @click.stop="copyContentToClipboard()"> title="Copy message to clipboard" @click.stop="copyContentToClipboard()">
<i data-feather="copy"></i> <i data-feather="copy"></i>
</div> </div>
<div v-if="!editMsgMode && message.sender!=this.$store.state.mountedPers.name" class="text-lg hover:text-secondary duration-75 active:scale-90 p-2" <div v-if="!editMsgMode && message.sender!=this.$store.state.mountedPers.name" class="text-lg text-red-500 hover:text-secondary duration-75 active:scale-90 p-2"
title="Resend message" title="Resend message with full context"
@click.stop="resendMessage()" @click.stop="resendMessage('full_context')"
:class="{ 'text-5xl': editMsgMode }"> :class="{ 'text-5xl': editMsgMode }">
<i data-feather="refresh-cw"></i> <i data-feather="send"></i>
</div>
<div v-if="!editMsgMode && message.sender!=this.$store.state.mountedPers.name" class="text-lg hover:text-secondary duration-75 active:scale-90 p-2"
title="Resend message without the full context"
@click.stop="resendMessage('simple_question')"
:class="{ 'text-5xl': editMsgMode }">
<i data-feather="send"></i>
</div> </div>
<div v-if="!editMsgMode && message.sender==this.$store.state.mountedPers.name" class="text-lg hover:text-secondary duration-75 active:scale-90 p-2" <div v-if="!editMsgMode && message.sender==this.$store.state.mountedPers.name" class="text-lg hover:text-secondary duration-75 active:scale-90 p-2"
title="Resend message" title="Resend message"
@ -392,8 +398,8 @@ export default {
this.$emit('updateMessage', this.message.id, this.message.content) this.$emit('updateMessage', this.message.id, this.message.content)
this.editMsgMode = false this.editMsgMode = false
}, },
resendMessage() { resendMessage(msg_type) {
this.$emit('resendMessage', this.message.id, this.message.content) this.$emit('resendMessage', this.message.id, this.message.content, msg_type)
}, },
continueMessage() { continueMessage() {
this.$emit('continueMessage', this.message.id, this.message.content) this.$emit('continueMessage', this.message.id, this.message.content)

View File

@ -1208,6 +1208,8 @@ export default {
} }
else if(messageItem && msgObj.message_type==this.msgTypes.MSG_TYPE_CHUNK){ else if(messageItem && msgObj.message_type==this.msgTypes.MSG_TYPE_CHUNK){
messageItem.content += msgObj.content messageItem.content += msgObj.content
} else if (msgObj.message_type == this.msgTypes.MSG_TYPE_STEP){
messageItem.steps.push({"message":msgObj.content,"done":true, "status":true })
} else if (msgObj.message_type == this.msgTypes.MSG_TYPE_STEP_START){ } else if (msgObj.message_type == this.msgTypes.MSG_TYPE_STEP_START){
messageItem.steps.push({"message":msgObj.content,"done":false, "status":true }) messageItem.steps.push({"message":msgObj.content,"done":false, "status":true })
} else if (msgObj.message_type == this.msgTypes.MSG_TYPE_STEP_END) { } else if (msgObj.message_type == this.msgTypes.MSG_TYPE_STEP_END) {
@ -1445,7 +1447,7 @@ export default {
}) })
}, },
resendMessage(msgId, msg) { resendMessage(msgId, msg, msg_type) {
nextTick(() => { nextTick(() => {
feather.replace() feather.replace()
@ -1456,9 +1458,10 @@ export default {
axios.get('/get_generation_status', {}).then((res) => { axios.get('/get_generation_status', {}).then((res) => {
if (res) { if (res) {
if (!res.data.status) { if (!res.data.status) {
socket.emit('generate_msg_from', { prompt: msg, id: msgId }); socket.emit('generate_msg_from', { prompt: msg, id: msgId, msg_type: msg_type });
} }
else { else {
this.$refs.toast.showToast("The server is busy. Wait", 4, false)
console.log("Already generating"); console.log("Already generating");
} }
} }
@ -1846,11 +1849,11 @@ export default {
async mounted() { async mounted() {
try { try {
const response = await fetch('/get_server_address'); // Replace with the actual endpoint on your Flask server const response = await fetch('/get_server_address'); // Replace with the actual endpoint on your Flask server
const serverAddress = await response.text(); let serverAddress = await response.text();
if(serverAddress.includes('<')){ if(serverAddress.includes('<')){
console.log(`Server address not found`) console.log(`Server address not found`)
return "http://localhost:9600/"//process.env.VITE_LOLLMS_API serverAddress = "http://localhost:9600/"//process.env.VITE_LOLLMS_API
} }

@ -1 +1 @@
Subproject commit 1ce2660d1076a381177d64aec2495dd97765f488 Subproject commit bcdf010ad9338f1cc48ff486273e01b62c6af42f