synced updates

This commit is contained in:
Saifeddine ALOUI 2024-02-10 11:33:09 +01:00
parent 4280b28462
commit 60ec136237
10 changed files with 436 additions and 255 deletions

View File

@ -1,5 +1,5 @@
# =================== Lord Of Large Language Multimodal Systems Configuration file ===========================
version: 54
version: 55
binding_name: null
model_name: null
@ -121,6 +121,10 @@ data_vectorization_make_persistance: false # If true, the data will be persistan
# Activate internet search
activate_internet_search: false
internet_vectorization_chunk_size: 512 # chunk size
internet_vectorization_overlap_size: 128 # overlap between chunks size
internet_vectorization_nb_chunks: 2 # number of chunks to use
internet_nb_search_pages: 3 # number of pages to select
# Helpers
pdf_latex_path: null

View File

@ -706,6 +706,8 @@ class LOLLMSWebUI(LOLLMSElfServer):
conditionning = self.personality.personality_conditioning
# Check if there are document files to add to the prompt
internet_search_rsults = ""
internet_search_sources = []
documentation = ""
knowledge = ""
@ -739,14 +741,31 @@ class LOLLMSWebUI(LOLLMSElfServer):
fun_mode=""
n_fun_mode = 0
discussion = None
if generation_type != "simple_question":
if self.config.activate_internet_search:
self.personality.callback = partial(self.process_chunk, client_id=client_id)
if discussion is None:
discussion = self.recover_discussion(client_id)[-512:]
self.personality.step_start("Crafting internet search query")
internet_search_rsults="!@>important information: Use the internet search results data to answer the user query. If the data is not present in the search, please tell the user that the information he is asking for was not found and he may need to increase the number of search pages from the settings. It is strictly forbidden to give the user an answer without having actual proof from the documentation.\n!@>Search results:\n"
query = self.personality.fast_gen(f"\n!@>instruction: Read the discussion and craft a web search query suited to recover needed information to answer the user.\nDo not answer the prompt. Do not add explanations.\n!@>discussion:\n{discussion}\n!@>websearch query: ", max_generation_size=256, show_progress=True)
self.personality.step_end("Crafting internet search query")
self.personality.step_start("Performing Internet search")
docs, sorted_similarities = self.personality.internet_search(query)
for doc, infos in zip(docs, sorted_similarities):
internet_search_sources.append(infos[0])
internet_search_rsults += f"search result chunk:\n{doc}"
self.personality.step_end("Performing INternet search")
if self.personality.persona_data_vectorizer:
if documentation=="":
documentation="\n!@>important information: Use the documentation data to answer the user questions. If the data is not present in the documentation, please tell the user that the information he is asking for does not exist in the documentation section. It is strictly forbidden to give the user an answer without having actual proof from the documentation.\n!@>Documentation:\n"
if self.config.data_vectorization_build_keys_words:
discussion = self.recover_discussion(client_id)[-512:]
if discussion is None:
discussion = self.recover_discussion(client_id)[-512:]
query = self.personality.fast_gen(f"\n!@>instruction: Read the discussion and rewrite the last prompt for someone who didn't read the entire discussion.\nDo not answer the prompt. Do not add explanations.\n!@>discussion:\n{discussion}\n!@>enhanced query: ", max_generation_size=256, show_progress=True)
ASCIIColors.cyan(f"Query:{query}")
else:
@ -796,6 +815,16 @@ class LOLLMSWebUI(LOLLMSElfServer):
tokens_conditionning = self.model.tokenize(conditionning)
n_cond_tk = len(tokens_conditionning)
# Tokenize the internet search results text and calculate its number of tokens
if len(internet_search_rsults)>0:
tokens_internet_search_rsults = self.model.tokenize(internet_search_rsults)
n_isearch_tk = len(tokens_internet_search_rsults)
else:
tokens_internet_search_rsults = []
n_isearch_tk = 0
# Tokenize the documentation text and calculate its number of tokens
if len(documentation)>0:
tokens_documentation = self.model.tokenize(documentation)
@ -823,13 +852,13 @@ class LOLLMSWebUI(LOLLMSElfServer):
# Calculate the total number of tokens between conditionning, documentation, and knowledge
total_tokens = n_cond_tk + n_doc_tk + n_history_tk + n_user_description_tk + n_positive_boost + n_negative_boost + n_force_language + n_fun_mode
total_tokens = n_cond_tk + n_isearch_tk + n_doc_tk + n_history_tk + n_user_description_tk + n_positive_boost + n_negative_boost + n_force_language + n_fun_mode
# Calculate the available space for the messages
available_space = self.config.ctx_size - n_tokens - total_tokens
if self.config.debug:
self.info(f"Tokens summary:\nConditionning:{n_cond_tk}\ndoc:{n_doc_tk}\nhistory:{n_history_tk}\nuser description:{n_user_description_tk}\nAvailable space:{available_space}",10)
self.info(f"Tokens summary:\nConditionning:{n_cond_tk}\nn_isearch_tk:{n_isearch_tk}\ndoc:{n_doc_tk}\nhistory:{n_history_tk}\nuser description:{n_user_description_tk}\nAvailable space:{available_space}",10)
# Raise an error if the available space is 0 or less
if available_space<1:
@ -902,7 +931,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
else:
ai_prefix = ""
# Build the final prompt by concatenating the conditionning and discussion messages
prompt_data = conditionning + documentation + knowledge + user_description + discussion_messages + positive_boost + negative_boost + force_language + fun_mode + ai_prefix
prompt_data = conditionning + internet_search_rsults + documentation + knowledge + user_description + discussion_messages + positive_boost + negative_boost + force_language + fun_mode + ai_prefix
# Tokenize the prompt data
tokens = self.model.tokenize(prompt_data)
@ -911,6 +940,8 @@ class LOLLMSWebUI(LOLLMSElfServer):
if self.config["debug"]:
ASCIIColors.bold("CONDITIONNING")
ASCIIColors.yellow(conditionning)
ASCIIColors.bold("INTERNET SEARCH")
ASCIIColors.yellow(internet_search_rsults)
ASCIIColors.bold("DOC")
ASCIIColors.yellow(documentation)
ASCIIColors.bold("HISTORY")
@ -927,6 +958,8 @@ class LOLLMSWebUI(LOLLMSElfServer):
# Details
context_details = {
"conditionning":conditionning,
"internet_search_sources":internet_search_sources,
"internet_search_rsults":internet_search_rsults,
"documentation":documentation,
"knowledge":knowledge,
"user_description":user_description,
@ -940,7 +973,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
}
# Return the prepared query, original message content, and tokenized query
return prompt_data, current_message.content, tokens, context_details
return prompt_data, current_message.content, tokens, context_details, internet_search_sources
def get_discussion_to(self, client_id, message_id=-1):
@ -1345,7 +1378,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
self.update_message(client_id, "✍ warming up ...", msg_type=MSG_TYPE.MSG_TYPE_STEP_START)
# prepare query and reception
self.discussion_messages, self.current_message, tokens, context_details = self.prepare_query(client_id, message_id, is_continue, n_tokens=self.config.min_n_predict, generation_type=generation_type)
self.discussion_messages, self.current_message, tokens, context_details, internet_search_sources = self.prepare_query(client_id, message_id, is_continue, n_tokens=self.config.min_n_predict, generation_type=generation_type)
self.prepare_reception(client_id)
self.generating = True
self.connections[client_id]["processing"]=True
@ -1422,7 +1455,27 @@ class LOLLMSWebUI(LOLLMSElfServer):
self.cancel_gen = False
# Send final message
if self.config.activate_internet_search:
from lollms.internet import get_favicon_url, get_root_url
sources_text = '<div class="mt-4 flex flex-wrap items-center gap-x-2 gap-y-1.5 text-sm ">'
sources_text += '<div class="text-gray-400 mr-10px">Sources:</div>'
for source in internet_search_sources:
url = "/".join(source.split("/")[:-1])
favicon_url = get_favicon_url(url)
if favicon_url is None:
favicon_url ="/personalities/internet/loi/assets/logo.png"
root_url = get_root_url(url)
sources_text += "\n".join([
f'<a class="flex items-center gap-2 whitespace-nowrap rounded-lg border bg-white px-2 py-1.5 leading-none hover:border-gray-300 dark:border-gray-800 dark:bg-gray-900 dark:hover:border-gray-700" target="_blank" href="{url}">',
f'<img class="h-3.5 w-3.5 rounded" src="{favicon_url}">'
f'<div>{root_url}</div>'
f'</a>',
])
sources_text += '</div>'
self.connections[client_id]["generated_text"]=self.connections[client_id]["generated_text"].split("!@>")[0] + "\n" + sources_text
self.personality.full(self.connections[client_id]["generated_text"])
self.close_message(client_id)
self.connections[client_id]["processing"]=False
if self.connections[client_id]["schedule_for_deletion"]:
del self.connections[client_id]

4
web/dist/assets/failed-183609e7.svg vendored Normal file
View File

@ -0,0 +1,4 @@
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24">
<path d="M0 0h24v24H0z" fill="none"/>
<path d="M19 6.41L17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12 19 6.41z" fill="red"/>
</svg>

After

Width:  |  Height:  |  Size: 266 B

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

4
web/dist/index.html vendored
View File

@ -6,8 +6,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>LoLLMS WebUI - Welcome</title>
<script type="module" crossorigin src="/assets/index-24171a58.js"></script>
<link rel="stylesheet" href="/assets/index-f4f40d4b.css">
<script type="module" crossorigin src="/assets/index-5a9814a4.js"></script>
<link rel="stylesheet" href="/assets/index-5504ec28.css">
</head>
<body>
<div id="app"></div>

View File

@ -205,9 +205,17 @@
placeholder="Send message..." @keydown.enter.exact="submitOnEnter($event)">
</textarea>
</div>
<div class="group relative w-max">
<div class="group relative w-12">
<button @click.prevent="toggleSwitch">
<svg width="100" height="50">
<rect x="10" y="15" width="40" height="20" rx="12" ry="12" :fill="config.activate_internet_search ? 'green' : 'red'" />
<circle cx="20" cy="25" r="7" :visibility="config.activate_internet_search ? 'hidden' : 'visible'" />
<circle cx="38" cy="25" r="7" :visibility="config.activate_internet_search ? 'visible' : 'hidden'" />
</svg>
</button>
<div class="pointer-events-none absolute -top-20 left-1/2 w-max -translate-x-1/2 rounded-md bg-gray-100 p-2 opacity-0 transition-opacity group-hover:opacity-100 dark:bg-gray-800"><p class="max-w-sm text-sm text-gray-800 dark:text-gray-200">When enabled, the model will try to complement its answer with information queried from the web.</p></div>
</div>
<div class="group relative w-max">
<button v-if="!loading" type="button" @click="submit" title="Send"
class="w-6 hover:text-secondary duration-75 active:scale-90 cursor-pointer transform transition-transform hover:translate-y-[-5px] active:scale-90">
@ -410,6 +418,32 @@ export default {
}
},
methods: {
toggleSwitch() {
this.$store.state.config.activate_internet_search = !this.$store.state.config.activate_internet_search;
this.isLoading = true;
axios.post('/apply_settings', {"config":this.$store.state.config}).then((res) => {
this.isLoading = false;
//console.log('apply-res',res)
if (res.data.status) {
if(this.$store.state.config.activate_internet_search){
this.$store.state.toast.showToast("Websearch activated.", 4, true)
}
else{
this.$store.state.toast.showToast("Websearch deactivated.", 4, true)
}
this.settingsChanged = false
//this.save_configuration()
} else {
this.$store.state.toast.showToast("Configuration change failed.", 4, false)
}
nextTick(() => {
feather.replace()
})
})
},
showModelConfig(){
try {
this.isLoading = true
@ -589,27 +623,6 @@ export default {
emitloaded(){
this.$emit('loaded')
},
applyConfiguration() {
this.isLoading = true;
axios.post('/apply_settings', {"config":this.configFile}).then((res) => {
this.isLoading = false;
//console.log('apply-res',res)
if (res.data.status) {
this.$store.state.toast.showToast("Configuration changed successfully.", 4, true)
this.settingsChanged = false
//this.save_configuration()
} else {
this.$store.state.toast.showToast("Configuration change failed.", 4, false)
}
nextTick(() => {
feather.replace()
})
})
},
showModels(event){
// Prevent the default button behavior
event.preventDefault();

View File

@ -194,7 +194,8 @@
<details v-show="message != undefined && message.steps != undefined && message.steps.length>0" class="flex w-full cursor-pointer rounded-xl border border-gray-200 bg-white shadow-sm dark:border-gray-800 dark:bg-gray-900 mb-3.5 max-w-full svelte-1escu1z">
<summary class="grid min-w-72 select-none grid-cols-[40px,1fr] items-center gap-2.5 p-2 svelte-1escu1z">
<div class="relative grid aspect-square place-content-center overflow-hidden rounded-lg bg-gray-300 dark:bg-gray-200">
<img v-if="message.status_message!='Done'" :src="loading_svg" class="absolute inset-0 text-gray-100 transition-opacity dark:text-gray-800 opacity-100">
<img v-if="message.status_message!='Done' & message.status_message!= 'Generation canceled'" :src="loading_svg" class="absolute inset-0 text-gray-100 transition-opacity dark:text-gray-800 opacity-100">
<img v-if="message.status_message== 'Generation canceled'" :src="failed_svg" class="absolute inset-0 text-gray-100 transition-opacity dark:text-gray-800 opacity-100">
<img v-if="message.status_message=='Done'" :src="ok_svg" class="absolute m-2 w-6 inset-0 text-geen-100 transition-opacity dark:text-gray-800 opacity-100">
</div>
<dl class="leading-4">
@ -301,6 +302,8 @@ import bash_block from '@/assets/bash_block.png';
import process_svg from '@/assets/process.svg';
import ok_svg from '@/assets/ok.svg';
import failed_svg from '@/assets/failed.svg';
import loading_svg from '@/assets/loading.svg';
@ -336,6 +339,7 @@ export default {
javascript_block:javascript_block,
process_svg:process_svg,
ok_svg:ok_svg,
failed_svg:failed_svg,
loading_svg:loading_svg,
code_block:code_block,
python_block:python_block,

View File

@ -68,6 +68,11 @@
<div class="flex-grow m-2 p-2 border border-blue-300 rounded-md border-2 border-blue-300 m-2 p-4" :class="{ 'border-red-500': generating }">
<div v-if="tab_id === 'source'">
<div class="flex flex-row justify-end mx-2">
<div v-if="editMsgMode" class="text-lg hover:text-secondary duration-75 active:scale-90 p-2 cursor-pointer hover:border-2"
title="Add generic block" @click.stop="addBlock('')">
<img :src="code_block" width="25" height="25">
</div>
<div class="text-lg hover:text-secondary duration-75 active:scale-90 p-2 cursor-pointer"
title="Add python block" @click.stop="addBlock('python')">
<img :src="python_block" width="25" height="25">
@ -222,6 +227,7 @@ import Card from "@/components/Card.vue"
import { nextTick, TransitionGroup } from 'vue'
const bUrl = import.meta.env.VITE_LOLLMS_API_BASEURL
import code_block from '@/assets/code_block.svg';
import python_block from '@/assets/python_block.png';
import javascript_block from '@/assets/javascript_block.svg';
import json_block from '@/assets/json_block.png';
@ -391,6 +397,7 @@ export default {
LaTeX_block:LaTeX_block,
javascript_block:javascript_block,
json_block:json_block,
code_block:code_block,
python_block:python_block,
bash_block:bash_block,

View File

@ -695,7 +695,103 @@
</table>
</Card>
<Card title="Internet search" :is_subcard="true" class="pb-2 m-2">
<table class="bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5 dark:bg-gray-700 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500">
<tr>
<td style="min-width: 200px;">
<label for="activate_internet_search" class="text-sm font-bold" style="margin-right: 1rem;">Activate internet search:</label>
</td>
<td>
<div class="flex flex-row">
<input
type="checkbox"
id="fun_mode"
required
v-model="configFile.activate_internet_search"
@change="settingsChanged=true"
class="mt-1 px-2 py-1 border border-gray-300 rounded dark:bg-gray-600"
>
</div>
</td>
</tr>
<tr>
<td style="min-width: 200px;">
<label for="internet_vectorization_chunk_size" class="text-sm font-bold" style="margin-right: 1rem;">Internet vectorization chunk size:</label>
</td>
<td>
<div class="flex flex-col">
<input id="internet_vectorization_chunk_size" v-model="configFile.internet_vectorization_chunk_size"
@change="settingsChanged=true"
type="range" min="0" max="64000" step="1"
class="flex-none h-2 mt-14 mb-2 w-full bg-gray-200 rounded-lg appearance-none cursor-pointer dark:bg-gray-700 focus:ring-blue-500 focus:border-blue-500 dark:border-gray-600 dark:placeholder-gray-400 dark:focus:ring-blue-500 dark:focus:border-blue-500">
<input v-model="configFile.internet_vectorization_chunk_size"
type="number"
@change="settingsChanged=true"
class="w-full mt-1 px-2 py-1 border border-gray-300 rounded dark:bg-gray-600"
>
</div>
</td>
</tr>
<tr>
<td style="min-width: 200px;">
<label for="internet_vectorization_overlap_size" class="text-sm font-bold" style="margin-right: 1rem;">Internet vectorization overlap size:</label>
</td>
<td>
<div class="flex flex-col">
<input id="internet_vectorization_overlap_size" v-model="configFile.internet_vectorization_overlap_size"
@change="settingsChanged=true"
type="range" min="0" max="64000" step="1"
class="flex-none h-2 mt-14 mb-2 w-full bg-gray-200 rounded-lg appearance-none cursor-pointer dark:bg-gray-700 focus:ring-blue-500 focus:border-blue-500 dark:border-gray-600 dark:placeholder-gray-400 dark:focus:ring-blue-500 dark:focus:border-blue-500">
<input v-model="configFile.internet_vectorization_overlap_size"
type="number"
@change="settingsChanged=true"
class="w-full mt-1 px-2 py-1 border border-gray-300 rounded dark:bg-gray-600"
>
</div>
</td>
</tr>
<tr>
<td style="min-width: 200px;">
<label for="internet_vectorization_nb_chunks" class="text-sm font-bold" style="margin-right: 1rem;">Internet vectorization number of chunks:</label>
</td>
<td>
<div class="flex flex-col">
<input id="internet_vectorization_nb_chunks" v-model="configFile.internet_vectorization_nb_chunks"
@change="settingsChanged=true"
type="range" min="0" max="64000" step="1"
class="flex-none h-2 mt-14 mb-2 w-full bg-gray-200 rounded-lg appearance-none cursor-pointer dark:bg-gray-700 focus:ring-blue-500 focus:border-blue-500 dark:border-gray-600 dark:placeholder-gray-400 dark:focus:ring-blue-500 dark:focus:border-blue-500">
<input v-model="configFile.internet_vectorization_nb_chunks"
type="number"
@change="settingsChanged=true"
class="w-full mt-1 px-2 py-1 border border-gray-300 rounded dark:bg-gray-600"
>
</div>
</td>
</tr>
<tr>
<td style="min-width: 200px;">
<label for="internet_nb_search_pages" class="text-sm font-bold" style="margin-right: 1rem;">Internet number of search pages:</label>
</td>
<td>
<div class="flex flex-col">
<input id="internet_nb_search_pages" v-model="configFile.internet_nb_search_pages"
@change="settingsChanged=true"
type="range" min="0" max="64000" step="1"
class="flex-none h-2 mt-14 mb-2 w-full bg-gray-200 rounded-lg appearance-none cursor-pointer dark:bg-gray-700 focus:ring-blue-500 focus:border-blue-500 dark:border-gray-600 dark:placeholder-gray-400 dark:focus:ring-blue-500 dark:focus:border-blue-500">
<input v-model="configFile.internet_nb_search_pages"
type="number"
@change="settingsChanged=true"
class="w-full mt-1 px-2 py-1 border border-gray-300 rounded dark:bg-gray-600"
>
</div>
</td>
</tr>
</table>
</Card>
<Card title="Latex" :is_subcard="true" class="pb-2 m-2">
<table class="bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5 dark:bg-gray-700 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500">
<tr>