Enhanced UI

This commit is contained in:
Saifeddine ALOUI 2023-12-04 01:40:36 +01:00
parent 61f2d6bae4
commit 066215ae78
16 changed files with 261 additions and 179 deletions

View File

@ -165,13 +165,16 @@ class LoLLMsAPI(LollmsApplication):
# This is used to keep track of messages
self.download_infos={}
self.connections = {0:{
self.connections = {
0:{
"current_discussion":None,
"generated_text":"",
"cancel_generation": False,
"generation_thread": None,
"processing":False,
"schedule_for_deletion":False
"schedule_for_deletion":False,
"continuing": False,
"first_chunk": True,
}
}
@ -207,7 +210,42 @@ class LoLLMsAPI(LollmsApplication):
ASCIIColors.error(f'Client {request.sid} disconnected')
@socketio.on('upgrade_vectorization')
def upgrade_vectorization():
if self.config.data_vectorization_activate and self.config.use_discussions_history:
try:
self.socketio.emit('show_progress')
self.socketio.sleep(0)
ASCIIColors.yellow("0- Detected discussion vectorization request")
folder = self.lollms_paths.personal_databases_path/"vectorized_dbs"
folder.mkdir(parents=True, exist_ok=True)
self.build_long_term_skills_memory()
ASCIIColors.yellow("1- Exporting discussions")
discussions = self.db.export_all_as_markdown_list_for_vectorization()
ASCIIColors.yellow("2- Adding discussions to vectorizer")
index = 0
nb_discussions = len(discussions)
for (title,discussion) in tqdm(discussions):
self.socketio.emit('update_progress',{'value':int(100*(index/nb_discussions))})
self.socketio.sleep(0)
index += 1
if discussion!='':
skill = self.learn_from_discussion(title, discussion)
self.long_term_memory.add_document(title, skill, chunk_size=self.config.data_vectorization_chunk_size, overlap_size=self.config.data_vectorization_overlap_size, force_vectorize=False, add_as_a_bloc=False)
ASCIIColors.yellow("3- Indexing database")
self.long_term_memory.index()
ASCIIColors.yellow("4- Saving database")
self.long_term_memory.save_to_json()
if self.config.data_vectorization_visualize_on_vectorization:
self.long_term_memory.show_document(show_interactive_form=True)
ASCIIColors.yellow("Ready")
except Exception as ex:
ASCIIColors.error(f"Couldn't vectorize database:{ex}")
self.socketio.emit('hide_progress')
self.socketio.sleep(0)
@socketio.on('cancel_install')
def cancel_install(data):
try:
@ -475,6 +513,13 @@ class LoLLMsAPI(LollmsApplication):
def new_discussion(data):
client_id = request.sid
title = data["title"]
if self.connections[client_id]["current_discussion"] is not None:
if self.long_term_memory is not None:
title, content = self.connections[client_id]["current_discussion"].export_for_vectorization()
skill = self.learn_from_discussion(title, content)
self.long_term_memory.add_document(title, skill, chunk_size=self.config.data_vectorization_chunk_size, overlap_size=self.config.data_vectorization_overlap_size, force_vectorize=False, add_as_a_bloc=False, add_to_index=True)
ASCIIColors.yellow("4- Saving database")
self.long_term_memory.save_to_json()
self.connections[client_id]["current_discussion"] = self.db.create_discussion(title)
# Get the current timestamp
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
@ -1435,10 +1480,11 @@ class LoLLMsAPI(LollmsApplication):
def notify(self, content, status=True, client_id=None):
def notify(self, content, status=True, duration=4, client_id=None):
self.socketio.emit('notification', {
'content': content,# self.connections[client_id]["generated_text"],
'status': status
'status': status,
"duration": duration
}, room=client_id
)
@ -1533,6 +1579,8 @@ class LoLLMsAPI(LollmsApplication):
if msg_type != MSG_TYPE.MSG_TYPE_INFO:
self.connections[client_id]["current_discussion"].update_message(self.connections[client_id]["generated_text"], new_metadata=mtdt, new_ui=ui)
def close_message(self, client_id):
if not self.connections[client_id]["current_discussion"]:
return

View File

@ -778,4 +778,23 @@ class Discussion:
# Retrieve current rank value for message_id
self.discussions_db.delete("DELETE FROM message WHERE id=?", (message_id,))
def export_for_vectorization(self):
"""
Export all discussions and their messages from the database to a Markdown list format.
Returns:
list: A list of lists representing discussions and their messages in a Markdown format.
Each inner list contains the discussion title and a string representing all
messages in the discussion in a Markdown format.
"""
# Extract the title
title = self.title()
messages = ""
# Iterate through messages in the discussion
for message in self.messages:
sender = message.sender
content = message.content
# Append the sender and content in a Markdown format
messages += f'{sender}: {content}\n'
return title, messages
# ========================================================================================================================

87
app.py
View File

@ -920,45 +920,18 @@ try:
elif setting_name == "model_name":
ASCIIColors.yellow(f"Changing model to: {data['setting_value']}")
self.config["model_name"]=data['setting_value']
if self.config["model_name"] is not None:
try:
GG = AdvancedGarbageCollector()
GG.safeHardCollect("model", self.binding)
self.model = None
self.binding.model = None
if self.binding:
del self.binding
self.binding = None
to_remove = []
for per in self.mounted_personalities:
if per is not None:
per.model = None
else:
to_remove.append(per)
for per in to_remove:
self.mounted_personalities.remove(per)
if len(self.mounted_personalities)==0:
self.config.personalities= ["generic/lollms"]
self.mount_personality(0)
gc.collect()
self.binding = BindingBuilder().build_binding(self.config, self.lollms_paths, self.notify)
self.model = self.binding.build_model()
for per in self.mounted_personalities:
if per is not None:
per.model = self.model
except Exception as ex:
# Catch the exception and get the traceback as a list of strings
traceback_lines = traceback.format_exception(type(ex), ex, ex.__traceback__)
# Join the traceback lines into a single string
traceback_text = ''.join(traceback_lines)
ASCIIColors.error(f"Couldn't load model: [{ex}]")
ASCIIColors.error(traceback_text)
return jsonify({ "status":False, 'error':str(ex)})
else:
ASCIIColors.warning("Trying to set a None model. Please select a model for the binding")
print("update_settings : New model selected")
self.config.save_config()
try:
self.model = None
for per in self.mounted_personalities:
per.model = None
self.model = self.binding.build_model()
if self.model is not None:
ASCIIColors.yellow("New model OK")
for per in self.mounted_personalities:
per.model = self.model
except Exception as ex:
self.notify("It looks like you we couldn't load the model.\nThis can hapen when you don't have enough VRAM. Please restart the program.",False,30)
elif setting_name== "binding_name":
if self.config['binding_name']!= data['setting_value']:
@ -1013,31 +986,6 @@ try:
self.rebuild_personalities()
if self.config.auto_save:
self.config.save_config()
if self.config.data_vectorization_activate and self.config.use_discussions_history:
try:
ASCIIColors.yellow("0- Detected discussion vectorization request")
folder = self.lollms_paths.personal_databases_path/"vectorized_dbs"
folder.mkdir(parents=True, exist_ok=True)
self.build_long_term_skills_memory()
ASCIIColors.yellow("1- Exporting discussions")
discussions = self.db.export_all_as_markdown_list_for_vectorization()
ASCIIColors.yellow("2- Adding discussions to vectorizer")
for (title,discussion) in tqdm(discussions):
if discussion!='':
skill = self.learn_from_discussion(discussion)
self.long_term_memory.add_document(title, skill, chunk_size=self.config.data_vectorization_chunk_size, overlap_size=self.config.data_vectorization_overlap_size, force_vectorize=False, add_as_a_bloc=False)
ASCIIColors.yellow("3- Indexing database")
self.long_term_memory.index()
ASCIIColors.yellow("4- Saving database")
self.long_term_memory.save_to_json()
if self.config.data_vectorization_visualize_on_vectorization:
self.long_term_memory.show_document(show_interactive_form=True)
ASCIIColors.yellow("Ready")
except Exception as ex:
ASCIIColors.error(f"Couldn't vectorize database:{ex}")
return jsonify({"status":True})
except Exception as ex:
trace_exception(ex)
@ -1320,8 +1268,15 @@ try:
discussions = self.db.export_all_as_markdown_list_for_vectorization()
ASCIIColors.yellow("2- Adding discussions to vectorizer")
self.notify("Adding discussions to vectorizer",True, None)
for (title,discussion) in discussions:
self.long_term_memory.add_document(title, discussion, chunk_size=self.config.data_vectorization_chunk_size, overlap_size=self.config.data_vectorization_overlap_size, force_vectorize=False, add_as_a_bloc=False)
index = 0
nb_discussions = len(discussions)
for (title,discussion) in tqdm(discussions):
self.socketio.emit('update_progress',{'value':int(100*(index/nb_discussions))})
index += 1
if discussion!='':
skill = self.learn_from_discussion(title, discussion)
self.long_term_memory.add_document(title, skill, chunk_size=self.config.data_vectorization_chunk_size, overlap_size=self.config.data_vectorization_overlap_size, force_vectorize=False, add_as_a_bloc=False)
ASCIIColors.yellow("3- Indexing database")
self.notify("Indexing database",True, None)
self.long_term_memory.index()

View File

@ -1,5 +1,5 @@
# =================== Lord Of Large Language Models Configuration file ===========================
version: 28
version: 30
binding_name: null
model_name: null
@ -59,6 +59,8 @@ audio_silenceTimer: 5000
# Data vectorization
use_discussions_history: false # Activate vectorizing previous conversations
summerize_discussion: false # activate discussion summary (better but adds computation time)
max_summary_size: 512 # in tokens
data_vectorization_visualize_on_vectorization: false
use_files: true # Activate using files
data_vectorization_activate: true # To activate/deactivate data vectorization

@ -1 +1 @@
Subproject commit bae2a71fa62c4408c311770f2b98bd43f01622bc
Subproject commit dc0c6a69b41d37c683528e12ff8f4c7bfb29b666

@ -1 +1 @@
Subproject commit 5f17e5f9c3c0d352f992408edfb23990c2afc59b
Subproject commit 425f7614df835e6bb9d01e8be1ea45593beb9b3a

File diff suppressed because one or more lines are too long

8
web/dist/assets/index-2b89acea.css vendored Normal file

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

4
web/dist/index.html vendored
View File

@ -6,8 +6,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>LoLLMS WebUI - Welcome</title>
<script type="module" crossorigin src="/assets/index-57514158.js"></script>
<link rel="stylesheet" href="/assets/index-29ecfd0d.css">
<script type="module" crossorigin src="/assets/index-33320784.js"></script>
<link rel="stylesheet" href="/assets/index-2b89acea.css">
</head>
<body>
<div id="app"></div>

View File

@ -364,7 +364,17 @@ export default {
},
computed: {
currentModel() {
return this.$store.state.currentModel;
if(this.$store.state.currentModel!=undefined){
console.log("Model found")
return this.$store.state.currentModel;
}
else{
console.log("No model found")
let obj = {}
obj.name="unknown"
return obj;
}
},
installedModels() {
return this.$store.state.installedModels;

View File

@ -226,7 +226,7 @@
</div>
</div>
</transition>
<div v-if="isReady" class="relative flex flex-col flex-grow " >
<div v-if="isReady" class="relative flex flex-col flex-grow" >
<div id="messages-list"
class=" z-0 flex flex-col flex-grow overflow-y-auto scrollbar-thin scrollbar-track-bg-light-tone scrollbar-thumb-bg-light-tone-panel hover:scrollbar-thumb-primary dark:scrollbar-track-bg-dark-tone dark:scrollbar-thumb-bg-dark-tone-panel dark:hover:scrollbar-thumb-primary active:scrollbar-thumb-secondary"
:class="isDragOverChat ? 'pointer-events-none' : ''">
@ -254,9 +254,8 @@
<div
class="absolute w-full bottom-0 bg-transparent p-10 pt-16 bg-gradient-to-t from-bg-light dark:from-bg-dark from-5% via-bg-light dark:via-bg-dark via-10% to-transparent to-100%">
</div>
<div class=" bottom-0 container flex flex-row items-center justify-center " v-if="currentDiscussion.id">
<div class="bottom-0 flex flex-row items-center justify-center " v-if="currentDiscussion.id">
<ChatBox ref="chatBox"
:loading="isGenerating"
:discussionList="discussionArr"
@ -284,6 +283,10 @@
@close-dialog="onclosedatabase_selectorDialog"
@choice-validated="onvalidatedatabase_selectorChoice"
/>
<div v-show="progress_visibility" role="status" class="fixed m-0 p-2 left-2 bottom-2 min-w-[24rem] max-w-[24rem] h-20 flex flex-col justify-center items-center pb-4 bg-blue-500 rounded-lg shadow-lg z-50 background-a">
<ProgressBar ref="progress" :progress="progress_value" class="w-full h-4"></ProgressBar>
<p class="text-2xl animate-pulse mt-2 text-white">{{ loading_infos }} ...</p>
</div>
</template>
@ -372,6 +375,8 @@ export default {
host:"",
// To be synced with the backend database types
msgTypes: {
progress_visibility_val : true,
progress_value : 0,
// Messaging
MSG_TYPE_CHUNK : 0, // A chunk of a message (used for classical chat)
MSG_TYPE_FULL : 1, // A full message (for some personality the answer is sent in bulk)
@ -433,7 +438,17 @@ export default {
discussion_id: 0,
}
},
methods: {
methods: {
show_progress(data){
this.progress_visibility_val = true;
},
hide_progress(data){
this.progress_visibility_val = false;
},
update_progress(data){
console.log("Progress update")
this.progress_value = data.value;
},
onSettingsBinding() {
try {
this.isLoading = true
@ -508,27 +523,24 @@ export default {
}
},
toggleLTM(){
async toggleLTM(){
this.$store.state.config.use_discussions_history =! this.$store.state.config.use_discussions_history;
this.applyConfiguration();
await this.applyConfiguration();
socket.emit('upgrade_vectorization');
},
applyConfiguration() {
async applyConfiguration() {
this.loading = true;
axios.post('/apply_settings', {"config":this.$store.state.config}).then((res) => {
this.loading = false;
//console.log('apply-res',res)
if (res.data.status) {
this.$refs.toast.showToast("Configuration changed successfully.", 4, true)
//this.save_configuration()
} else {
this.$refs.toast.showToast("Configuration change failed.", 4, false)
}
nextTick(() => {
feather.replace()
})
}).catch((err)=>{
this.loading = false;
const res = await axios.post('/apply_settings', {"config":this.$store.state.config})
this.loading = false;
//console.log('apply-res',res)
if (res.data.status) {
this.$refs.toast.showToast("Configuration changed successfully.", 4, true)
//this.save_configuration()
} else {
this.$refs.toast.showToast("Configuration change failed.", 4, false)
}
nextTick(() => {
feather.replace()
})
},
save_configuration() {
@ -1186,7 +1198,7 @@ export default {
const msgList = document.getElementById('messages-list')
this.scrollBottom(msgList)
})
this.$refs.toast.showToast(notif.content, 5, notif.status)
this.$refs.toast.showToast(notif.content, notif.duration, notif.status)
this.chime.play()
},
streamMessageContent(msgObj) {
@ -1834,6 +1846,10 @@ export default {
// socket responses
socket.on('show_progress', this.show_progress)
socket.on('hide_progress', this.hide_progress)
socket.on('update_progress', this.update_progress)
socket.on('notification', this.notify)
socket.on('new_message', this.new_message)
socket.on('update_message', this.streamMessageContent)
@ -1909,6 +1925,9 @@ export default {
ProgressBar
},
watch: {
progress_visibility_val(newVal) {
console.log("progress_visibility changed")
},
filterTitle(newVal) {
if (newVal == '') {
this.filterInProgress = true
@ -1942,6 +1961,11 @@ export default {
},
computed: {
progress_visibility: {
get(){
return self.progress_visibility_val;
}
},
version_info:{
get(){
if(this.$store.state.version!=undefined && this.$store.state.version!="unknown"){
@ -1952,6 +1976,7 @@ export default {
}
}
},
loading_infos:{
get(){
return this.$store.state.loading_infos;

View File

@ -46,7 +46,10 @@
class="block min-h-500 p-2.5 w-full text-gray-900 bg-gray-50 rounded-lg border border-gray-300 focus:ring-blue-500 focus:border-blue-500 dark:bg-gray-700 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500 overflow-y-scroll flex flex-col shadow-lg p-10 pt-0 overflow-y-scroll dark:bg-bg-dark scrollbar-thin scrollbar-track-bg-light-tone scrollbar-thumb-bg-light-tone-panel hover:scrollbar-thumb-primary dark:scrollbar-track-bg-dark-tone dark:scrollbar-thumb-bg-dark-tone-panel dark:hover:scrollbar-thumb-primary active:scrollbar-thumb-secondary"
:rows="4"
:style="{ minHeight: mdRenderHeight + `px` }" placeholder="Enter message here..."
v-model="text">
v-model="text"
@click.prevent="mdTextarea_clicked"
@change.prevent="mdTextarea_changed"
>
</textarea>
<span>Cursor position {{ cursorPosition }}</span>
@ -300,6 +303,7 @@ export default {
name: 'PlayGroundView',
data() {
return {
mdRenderHeight:300,
selecting_model:false,
tab_id:"source",
generating:false,
@ -341,8 +345,6 @@ export default {
// Event handler for receiving generated text chunks
socket.on('text_chunk', data => {
this.appendToOutput(data.chunk);
// const text_element = document.getElementById('text_element');
// text_element.scrollTo(0, text_element.scrollHeight);
});
// Event handler for receiving generated text chunks
@ -447,13 +449,13 @@ export default {
event.preventDefault();
},
text_element_changed(){
console.log("text_element_changed")
this.cursorPosition = this.$refs.text_element.selectionStart;
mdTextarea_changed(){
console.log("mdTextarea_changed")
this.cursorPosition = this.$refs.mdTextarea.selectionStart;
},
text_element_clicked(){
console.log("text_element_clicked")
this.cursorPosition = this.$refs.text_element.selectionStart;
mdTextarea_clicked(){
console.log(`mdTextarea_clicked: ${this.$refs.mdTextarea.selectionStart}`)
this.cursorPosition = this.$refs.mdTextarea.selectionStart;
},
setModel(){
this.selecting_model=true
@ -545,7 +547,7 @@ export default {
speakChunk();
},
getCursorPosition() {
return this.cursorPosition;
return this.$refs.mdTextarea.selectionStart;
},
appendToOutput(chunk){
this.pre_text += chunk
@ -583,7 +585,11 @@ export default {
this.pre_text = this.text.substring(0,this.getCursorPosition())
this.post_text = this.text.substring(this.getCursorPosition(), this.text.length)
var prompt = this.text.substring(0,this.getCursorPosition())
console.log(prompt)
console.log(this.text)
console.log(`cursor position :${this.getCursorPosition()}`)
console.log(`pretext:${this.pre_text}`)
console.log(`post_text:${this.post_text}`)
console.log(`prompt:${prompt}`)
// Trigger the 'generate_text' event with the prompt
socket.emit('generate_text', { prompt: prompt, personality: -1, n_predicts: this.n_predicts , n_crop: this.n_crop,
parameters: {

View File

@ -486,6 +486,23 @@
</td>
</tr>
<tr>
<td style="min-width: 200px;">
<label for="summerize_discussion" class="text-sm font-bold" style="margin-right: 1rem;">Activate Continuous Learning from discussions:</label>
</td>
<td>
<div class="flex flex-row">
<input
type="checkbox"
id="summerize_discussion"
required
v-model="configFile.summerize_discussion"
@change="settingsChanged=true"
class="mt-1 px-2 py-1 border border-gray-300 rounded dark:bg-gray-600"
>
</div>
</td>
</tr>
<tr>
<td style="min-width: 200px;">
<label for="data_vectorization_visualize_on_vectorization" class="text-sm font-bold" style="margin-right: 1rem;">show vectorized data:</label>
</td>

@ -1 +1 @@
Subproject commit 3cc010cad244c56766e02ab4c72abda4b4afe8fa
Subproject commit ccabccd29b4d087bf5f3528dec2da664478aa03d

@ -1 +1 @@
Subproject commit 67a62dc6bf1d4417da1f2fb2e1c6c9a09f7404f6
Subproject commit be8781ff3d44dd901b706529d0c468a239e4583c