Moved all personalities to new prompting format

This commit is contained in:
Saifeddine ALOUI 2025-02-17 02:02:16 +01:00
parent 08395cc6c7
commit 3be9275999
10 changed files with 55 additions and 36 deletions

@ -1 +1 @@
Subproject commit 42ea55c4ed09d5e332cbd357df3b28d8f4549ee0
Subproject commit 862fcc594fb364a11f0bb98447f1278b3c0198ec

View File

@ -29,6 +29,7 @@ import requests
from lollms.app import LollmsApplication
from lollms.binding import (BindingBuilder, BindingType, LLMBinding,
LOLLMSConfig, ModelBuilder)
from lollms.function_call import FunctionCall, FunctionType
from lollms.client_session import Client
from lollms.com import LoLLMsCom, NotificationDisplayType, NotificationType
from lollms.config import InstallOption
@ -1411,7 +1412,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
f"{k}: {v}"
for k, v in self.config.smart_routing_models_description.items()
]+[
"!@>prompt:" + context_details["prompt"],
"!@>prompt:" + context_details.prompt,
"""Given the prompt, which model among the previous list is the most suited and why?
You must answer with json code placed inside the markdown code tag like this:
@ -1672,23 +1673,23 @@ Don't forget encapsulate the code inside a markdown code tag. This is mandatory.
texts=[
"🚀 Generation Options:\n",
"• Fun Mode: ",
f"{EMOJI_YES if context_details['fun_mode'] else EMOJI_NO}",
f"{EMOJI_YES if context_details.fun_mode else EMOJI_NO}",
"\n",
"• Think First Mode: ",
f"{EMOJI_YES if context_details['think_first_mode'] else EMOJI_NO}",
f"{EMOJI_YES if context_details.think_first_mode else EMOJI_NO}",
"\n",
"• Continuation: ",
f"{EMOJI_YES if context_details['is_continue'] else EMOJI_NO}",
f"{EMOJI_YES if context_details.is_continue else EMOJI_NO}",
"\n",
"🎮 Generating up to ",
f"{min(context_details['available_space'], self.config.max_n_predict)}",
f"{min(context_details.available_space, self.config.max_n_predict)}",
" tokens...",
"\n",
"Available context space: ",
f"{context_details['available_space']}",
f"{context_details.available_space}",
"\n",
"Prompt tokens used: ",
f"{self.config.ctx_size - context_details['available_space']}",
f"{self.config.ctx_size - context_details.available_space}",
"\n",
"Max tokens allowed: ",
f"{self.config.max_n_predict}",
@ -1724,12 +1725,28 @@ Don't forget encapsulate the code inside a markdown code tag. This is mandatory.
self.generating = True
client.processing = True
try:
self.generate(
generation_output = self.generate(
context_details,
client_id=client_id,
is_continue=is_continue,
callback=partial(self.process_data, client_id=client_id),
)
try:
if len(context_details.function_calls)>0:
codes = self.personality.extract_code_blocks(generation_output)
for code in codes:
if code["type"]=="function":
infos = json.loads(code["content"])
for function_call in context_details.function_calls:
if infos["function_name"]==function_call["name"]:
fc:FunctionCall = function_call["class"]
if fc.function_type == FunctionType.CLASSIC:
self.personality.new_message("")
output = fc.execute(**infos["function_parameters"])
self.personality.set_message_content(output)
except Exception as ex:
trace_exception(ex)
if (
self.tts
and self.config.auto_read
@ -1810,10 +1827,10 @@ Don't forget encapsulate the code inside a markdown code tag. This is mandatory.
self.cancel_gen = False
sources_text = ""
if len(context_details["documentation_entries"]) > 0:
if len(context_details.documentation_entries) > 0:
sources_text += '<div class="text-gray-400 mr-10px flex items-center gap-2"><i class="fas fa-book"></i>Sources:</div>'
sources_text += '<div class="mt-4 flex flex-col items-start gap-x-2 gap-y-1.5 text-sm">'
for source in context_details["documentation_entries"]:
for source in context_details.documentation_entries:
title = source["document_title"]
path = source["document_path"]
content = source["chunk_content"]
@ -1838,10 +1855,10 @@ Don't forget encapsulate the code inside a markdown code tag. This is mandatory.
sources_text += "</div>"
self.personality.set_message_html(sources_text)
if len(context_details["skills"]) > 0:
if len(context_details.skills) > 0:
sources_text += '<div class="text-gray-400 mr-10px flex items-center gap-2"><i class="fas fa-brain"></i>Memories:</div>'
sources_text += '<div class="mt-4 w-full flex flex-col items-start gap-x-2 gap-y-1.5 text-sm">'
for ind, skill in enumerate(context_details["skills"]):
for ind, skill in enumerate(context_details.skills):
sources_text += f"""
<div class="source-item w-full">
<div class="flex items-center gap-2 p-2 bg-gray-100 rounded cursor-pointer hover:bg-gray-200"
@ -1873,7 +1890,7 @@ Don't forget encapsulate the code inside a markdown code tag. This is mandatory.
<div class="grid grid-cols-1 sm:grid-cols-2 md:grid-cols-3 lg:grid-cols-4 gap-4 h-64 overflow-y-auto scrollbar-thin scrollbar-thumb-gray-300 scrollbar-track-gray-100">
"""
for source in context_details["internet_search_infos"]:
for source in context_details.internet_search_infos:
url = source["url"]
title = source["title"]
brief = source["brief"]
@ -1959,6 +1976,7 @@ Don't forget encapsulate the code inside a markdown code tag. This is mandatory.
ASCIIColors.color_bright_cyan,
]
)
if self.config.auto_title:
d = client.discussion
ttl = d.title()
@ -2024,10 +2042,10 @@ Don't forget encapsulate the code inside a markdown code tag. This is mandatory.
)
client.generated_text = ""
ASCIIColors.info(
f"prompt has {self.config.ctx_size-context_details['available_space']} tokens"
f"prompt has {self.config.ctx_size-context_details.available_space} tokens"
)
ASCIIColors.info(
f"warmup for generating up to {min(context_details['available_space'],self.config.max_n_predict if self.config.max_n_predict else self.config.ctx_size)} tokens"
f"warmup for generating up to {min(context_details.available_space,self.config.max_n_predict if self.config.max_n_predict else self.config.ctx_size)} tokens"
)
self.generate(
context_details,

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

4
web/dist/index.html vendored
View File

@ -6,8 +6,8 @@
<script src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-svg.js"></script>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>LoLLMS WebUI</title>
<script type="module" crossorigin src="/assets/index-DuP48K9W.js"></script>
<link rel="stylesheet" crossorigin href="/assets/index-PJ0LfNP6.css">
<script type="module" crossorigin src="/assets/index-CXPlPeJJ.js"></script>
<link rel="stylesheet" crossorigin href="/assets/index-CBXLEN4w.css">
</head>
<body>
<div id="app"></div>

View File

@ -187,7 +187,7 @@ export default {
unmount() {
console.log("Unmounting")
console.log(this.onUnMount)
this.onUnMount(this)
this.onUnMount(this.function_call)
this.mounted=false
},
showHelp() {

View File

@ -4502,8 +4502,8 @@
:key="'index-' + index + '-' + func.name"
:function_call="func"
:on-mount="mountFunction"
:on-unmount="unmountFunction"
:on-remount="remountFunction"
:on-un-mount="unmountFunction"
:on-re-mount="remountFunction"
:on-edit="editFunction"
:on-copy-to-custom="copyToCustom"
/>
@ -4905,7 +4905,7 @@ export default {
mountedFuncArr: [], // List of mounted functions
searchFunction: '', // Search query for functions
searchFunctionInProgress: false, // Loading state for search
function_category: [], // the current category
function_category: null, // the current category
funcCatgArr: [], // List of function categories
functionsFiltered: [], // Filtered list of functions
@ -5105,12 +5105,12 @@ export default {
function_name: func.name,
});
if (response.data.status) {
this.showMessage('Function mounted successfully', true);
this.$store.state.messageBox.showMessage('Function mounted successfully', true);
} else {
this.showMessage('Failed to mount function', false);
this.$store.state.messageBox.showMessage('Failed to mount function', false);
}
} catch (error) {
this.showMessage('Error mounting function', false);
this.$store.state.messageBox.showMessage('Error mounting function', false);
console.error(error);
}
},
@ -5133,7 +5133,7 @@ export default {
this.$store.state.messageBox.showMessage('Failed to unmount function', false);
}
} catch (error) {
this.showMessage('Error unmounting function', false);
this.$store.state.messageBox.showMessage('Error unmounting function', false);
console.error(error);
}
},
@ -5145,22 +5145,23 @@ export default {
client_id: this.$store.state.client_id,
});
if (response.data.status) {
this.showMessage('All functions unmounted successfully', true);
this.$store.state.messageBox.showMessage('All functions unmounted successfully', true);
this.$store.dispatch('refreshMountedFunctions');
} else {
this.showMessage('Failed to unmount all functions', false);
this.$store.state.messageBox.showMessage('Failed to unmount all functions', false);
}
} catch (error) {
this.showMessage('Error unmounting all functions', false);
this.$store.state.messageBox.showMessage('Error unmounting all functions', false);
console.error(error);
}
},
// Update function category
update_function_category(category, refresh) {
console.log("this.function_category")
this.function_category = category;
if (refresh) {
this.refreshFunctionsZoo();
this.refreshFunctionsZoo();
}
},

@ -1 +1 @@
Subproject commit 48c09177f19183ccf485baa6d29fe17fa204bdaf
Subproject commit 25fb2e504bc95887ee091a8ae6cd113c53fba7fd

@ -1 +1 @@
Subproject commit 98f44bef458ca6725facdec46ac24b27be48df14
Subproject commit db32844d62c2d920da5a6e455bda035ff83d11b0

@ -1 +1 @@
Subproject commit e46da524e83e54409ef3e89137b40814bd081244
Subproject commit 27100d3decb92d237ee8290aacfccb70a30736f2