This commit is contained in:
Saifeddine ALOUI 2023-11-26 02:33:25 +01:00
parent 14385f6f3d
commit b4c8953598
13 changed files with 179 additions and 132 deletions

View File

@ -111,7 +111,7 @@ def parse_requirements_file(requirements_path):
# ===========================================================
class LoLLMsAPPI(LollmsApplication):
class LoLLMsAPI(LollmsApplication):
def __init__(self, config:LOLLMSConfig, socketio, config_file_path:str, lollms_paths: LollmsPaths) -> None:
super().__init__("Lollms_webui",config, lollms_paths, callback=self.process_chunk, notification_callback=self.notify)
@ -936,6 +936,81 @@ class LoLLMsAPPI(LollmsApplication):
ASCIIColors.green(f"{self.lollms_paths.personal_path}")
def clean_string(self, input_string):
# Remove extra spaces by replacing multiple spaces with a single space
#cleaned_string = re.sub(r'\s+', ' ', input_string)
# Remove extra line breaks by replacing multiple consecutive line breaks with a single line break
cleaned_string = re.sub(r'\n\s*\n', '\n', input_string)
# Create a string containing all punctuation characters
punctuation_chars = string.punctuation
# Define a regular expression pattern to match and remove non-alphanumeric characters
#pattern = f'[^a-zA-Z0-9\s{re.escape(punctuation_chars)}]' # This pattern matches any character that is not a letter, digit, space, or punctuation
pattern = f'[^a-zA-Z0-9\u00C0-\u017F\s{re.escape(punctuation_chars)}]'
# Use re.sub to replace the matched characters with an empty string
cleaned_string = re.sub(pattern, '', cleaned_string)
return cleaned_string
def make_discussion_title(self, discussion, client_id=None):
"""
Builds a title for a discussion
"""
# Get the list of messages
messages = discussion.get_messages()
discussion_messages = "!@>instruction: Create a short title to this discussion\n"
discussion_title = "\n!@>Discussion title:"
available_space = self.config.ctx_size - 150 - len(self.model.tokenize(discussion_messages))- len(self.model.tokenize(discussion_title))
# Initialize a list to store the full messages
full_message_list = []
# Accumulate messages until the cumulative number of tokens exceeds available_space
tokens_accumulated = 0
# Accumulate messages starting from message_index
for message in messages:
# Check if the message content is not empty and visible to the AI
if message.content != '' and (
message.message_type <= MSG_TYPE.MSG_TYPE_FULL_INVISIBLE_TO_USER.value and message.message_type != MSG_TYPE.MSG_TYPE_FULL_INVISIBLE_TO_AI.value):
# Tokenize the message content
message_tokenized = self.model.tokenize(
"\n" + self.config.discussion_prompt_separator + message.sender + ": " + message.content.strip())
# Check if adding the message will exceed the available space
if tokens_accumulated + len(message_tokenized) > available_space:
break
# Add the tokenized message to the full_message_list
full_message_list.insert(0, message_tokenized)
# Update the cumulative number of tokens
tokens_accumulated += len(message_tokenized)
# Build the final discussion messages by detokenizing the full_message_list
for message_tokens in full_message_list:
discussion_messages += self.model.detokenize(message_tokens)
discussion_messages += discussion_title
title = [""]
def receive(
chunk:str,
message_type:MSG_TYPE
):
if chunk:
title[0] += chunk
antiprompt = self.personality.detect_antiprompt(title[0])
if antiprompt:
ASCIIColors.warning(f"\nDetected hallucination with antiprompt: {antiprompt}")
title[0] = self.remove_text_from_string(title[0],antiprompt)
return False
else:
return True
self._generate(discussion_messages, 150, client_id, receive)
ASCIIColors.info(title[0])
return title[0]
def rebuild_personalities(self, reload_all=False):
if reload_all:
self.mounted_personalities=[]
@ -1137,80 +1212,6 @@ class LoLLMsAPPI(LollmsApplication):
self.start_time = datetime.now()
def clean_string(self, input_string):
# Remove extra spaces by replacing multiple spaces with a single space
#cleaned_string = re.sub(r'\s+', ' ', input_string)
# Remove extra line breaks by replacing multiple consecutive line breaks with a single line break
cleaned_string = re.sub(r'\n\s*\n', '\n', input_string)
# Create a string containing all punctuation characters
punctuation_chars = string.punctuation
# Define a regular expression pattern to match and remove non-alphanumeric characters
#pattern = f'[^a-zA-Z0-9\s{re.escape(punctuation_chars)}]' # This pattern matches any character that is not a letter, digit, space, or punctuation
pattern = f'[^a-zA-Z0-9\u00C0-\u017F\s{re.escape(punctuation_chars)}]'
# Use re.sub to replace the matched characters with an empty string
cleaned_string = re.sub(pattern, '', cleaned_string)
return cleaned_string
def make_discussion_title(self, discussion, client_id=None):
"""
Builds a title for a discussion
"""
# Get the list of messages
messages = discussion.get_messages()
discussion_messages = "!@>instruction: Create a short title to this discussion\n"
discussion_title = "\n!@>Discussion title:"
available_space = self.config.ctx_size - 150 - len(self.model.tokenize(discussion_messages))- len(self.model.tokenize(discussion_title))
# Initialize a list to store the full messages
full_message_list = []
# Accumulate messages until the cumulative number of tokens exceeds available_space
tokens_accumulated = 0
# Accumulate messages starting from message_index
for message in messages:
# Check if the message content is not empty and visible to the AI
if message.content != '' and (
message.message_type <= MSG_TYPE.MSG_TYPE_FULL_INVISIBLE_TO_USER.value and message.message_type != MSG_TYPE.MSG_TYPE_FULL_INVISIBLE_TO_AI.value):
# Tokenize the message content
message_tokenized = self.model.tokenize(
"\n" + self.config.discussion_prompt_separator + message.sender + ": " + message.content.strip())
# Check if adding the message will exceed the available space
if tokens_accumulated + len(message_tokenized) > available_space:
break
# Add the tokenized message to the full_message_list
full_message_list.insert(0, message_tokenized)
# Update the cumulative number of tokens
tokens_accumulated += len(message_tokenized)
# Build the final discussion messages by detokenizing the full_message_list
for message_tokens in full_message_list:
discussion_messages += self.model.detokenize(message_tokens)
discussion_messages += discussion_title
title = [""]
def receive(
chunk:str,
message_type:MSG_TYPE
):
if chunk:
title[0] += chunk
antiprompt = self.personality.detect_antiprompt(title[0])
if antiprompt:
ASCIIColors.warning(f"\nDetected hallucination with antiprompt: {antiprompt}")
title[0] = self.remove_text_from_string(title[0],antiprompt)
return False
else:
return True
self._generate(discussion_messages, 150, client_id, receive)
ASCIIColors.info(title[0])
return title[0]
def prepare_query(self, client_id: str, message_id: int = -1, is_continue: bool = False, n_tokens: int = 0) -> Tuple[str, str, List[str]]:
"""
@ -1913,7 +1914,7 @@ def parse_requirements_file(requirements_path):
# ===========================================================
class LoLLMsAPPI(LollmsApplication):
class LoLLMsAPI(LollmsApplication):
def __init__(self, config:LOLLMSConfig, socketio, config_file_path:str, lollms_paths: LollmsPaths) -> None:
super().__init__("Lollms_webui",config, lollms_paths, callback=self.process_chunk, notification_callback=self.notify)
@ -2937,17 +2938,6 @@ class LoLLMsAPPI(LollmsApplication):
print("Couldn't download file:", str(e))
def prepare_reception(self, client_id):
if not self.connections[client_id]["continuing"]:
self.connections[client_id]["generated_text"] = ""
self.connections[client_id]["first_chunk"]=True
self.nb_received_tokens = 0
self.start_time = datetime.now()
def clean_string(self, input_string):
# Remove extra spaces by replacing multiple spaces with a single space
@ -3021,8 +3011,16 @@ class LoLLMsAPPI(LollmsApplication):
self._generate(discussion_messages, 150, client_id, receive)
ASCIIColors.info(title[0])
return title[0]
def prepare_reception(self, client_id):
if not self.connections[client_id]["continuing"]:
self.connections[client_id]["generated_text"] = ""
self.connections[client_id]["first_chunk"]=True
self.nb_received_tokens = 0
self.start_time = datetime.now()
def prepare_query(self, client_id: str, message_id: int = -1, is_continue: bool = False, n_tokens: int = 0, generation_type = None) -> Tuple[str, str, List[str]]:
"""
@ -3615,6 +3613,18 @@ class LoLLMsAPPI(LollmsApplication):
ASCIIColors.success(f" ╔══════════════════════════════════════════════════╗ ")
ASCIIColors.success(f" ║ Done ║ ")
ASCIIColors.success(f" ╚══════════════════════════════════════════════════╝ ")
if self.config.auto_title:
d = self.connections[client_id]["current_discussion"]
ttl = d.title()
if ttl is None or ttl=="" or ttl=="untitled":
title = self.make_discussion_title(d, client_id=client_id)
d.rename(title)
self.socketio.emit('disucssion_renamed',{
'status': True,
'discussion_id':d.id,
'title':title
}, room=request.sid)
self.busy=False
else:

View File

@ -656,6 +656,17 @@ class Discussion:
f"UPDATE discussion SET title=? WHERE id=?",(new_title,self.discussion_id)
)
def title(self):
"""Renames the discussion
Args:
new_title (str): The nex discussion name
"""
rows = self.discussions_db.select(
f"Select title from discussion WHERE id={self.discussion_id}"
)
return rows[0][0]
def delete_discussion(self):
"""Deletes the discussion
"""

5
app.py
View File

@ -87,7 +87,7 @@ try:
import pkg_resources
from api.config import load_config
from api import LoLLMsAPPI
from api import LoLLMsAPI
import shutil
import socket
from api.db import DiscussionsDB, Discussion
@ -189,7 +189,7 @@ try:
class LoLLMsWebUI(LoLLMsAPPI):
class LoLLMsWebUI(LoLLMsAPI):
def __init__(self, args, _app, _socketio, config:LOLLMSConfig, config_file_path:Path|str, lollms_paths:LollmsPaths) -> None:
self.args = args
if config.auto_update:
@ -1712,6 +1712,7 @@ try:
ASCIIColors.info("")
ASCIIColors.info("")
run_update_script(self.args)
sys.exit()
def get_current_personality_files_list(self):
if self.personality is None:

View File

@ -1,5 +1,5 @@
# =================== Lord Of Large Language Models Configuration file ===========================
version: 27
version: 28
binding_name: null
model_name: null
@ -43,6 +43,7 @@ db_path: database.db
debug: False
auto_update: true
auto_save: true
auto_title: false
# Enables gpu usage
enable_gpu: true
# Automatically open the browser

@ -1 +1 @@
Subproject commit 10af0b25f2b385c85ad00179f95b59840261433c
Subproject commit 1accdb2f273e4a80b9310648d03faa01aee86cf3

@ -1 +1 @@
Subproject commit 7b5dae5940fb456792af7db6cf28b50284fb9120
Subproject commit da6cb519dad5d845b4e31a42bab412c305449dbd

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

4
web/dist/index.html vendored
View File

@ -6,8 +6,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>LoLLMS WebUI - Welcome</title>
<script type="module" crossorigin src="/assets/index-6f13db08.js"></script>
<link rel="stylesheet" href="/assets/index-d513444c.css">
<script type="module" crossorigin src="/assets/index-d475a66e.js"></script>
<link rel="stylesheet" href="/assets/index-941c6b8c.css">
</head>
<body>
<div id="app"></div>

View File

@ -1786,10 +1786,17 @@ export default {
this.$nextTick(() => {
feather.replace();
});
socket.on('disucssion_renamed',()=>{
})
socket.onclose = (event) => {
console.log('WebSocket connection closed:', event.code, event.reason);
this.socketIODisconnected();
console.log('Received new title', event.id, event.title);
/*
{
'status': True,
'discussion_id':d.id,
'title':title
}*/
};
socket.on("connect_error", (error) => {
if (error.message === "ERR_CONNECTION_REFUSED") {

View File

@ -348,6 +348,23 @@
</div>
</td>
</tr>
<tr>
<td style="min-width: 200px;">
<label for="auto_update" class="text-sm font-bold" style="margin-right: 1rem;">Auto title:</label>
</td>
<td>
<div class="flex flex-row">
<input
type="checkbox"
id="auto_title"
required
v-model="configFile.auto_title"
@change="settingsChanged=true"
class="mt-1 px-2 py-1 border border-gray-300 rounded dark:bg-gray-600"
>
</div>
</td>
</tr>
</table>
</Card>
<Card title="User" :is_subcard="true" class="pb-2 m-2">

@ -1 +1 @@
Subproject commit 48d36e5aa4e355ccbe77ae3e99681fdf2a75e73c
Subproject commit 836732fd0c2892fc00e6e98bd1452df4fed17efa

@ -1 +1 @@
Subproject commit 9e94cb64ef5cd4d69428482fdd5a748ebbfc09ba
Subproject commit 3cd9e5968e61e77825623cf9113c5914cbb1c7cc