From fab35e47c5f71624acdc8f56c4dee03334885a25 Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Wed, 22 Jan 2025 21:38:22 +0100 Subject: [PATCH] enhanced prompt management --- lollms/app.py | 5 ++++- lollms/personality.py | 10 ++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/lollms/app.py b/lollms/app.py index 710fb81..9559b88 100644 --- a/lollms/app.py +++ b/lollms/app.py @@ -1321,7 +1321,10 @@ Answer directly with the reformulation of the last prompt. else: msg = self.ai_custom_header("assistant") + message.content.strip() else: - msg = self.user_full_header + message.content.strip() + if self.config.use_user_name_in_discussions: + msg = self.user_full_header + message.content.strip() + else: + msg = self.user_custom_header("user") + message.content.strip() msg += self.separator_template message_tokenized = self.model.tokenize(msg) diff --git a/lollms/personality.py b/lollms/personality.py index 6f9baef..c7a906a 100644 --- a/lollms/personality.py +++ b/lollms/personality.py @@ -257,8 +257,7 @@ class AIPersonality: def build_context(self, context_details, is_continue=False, return_tokens=False): # Build the final prompt by concatenating the conditionning and discussion messages - prompt_data = self.separator_template.join( - [ + elements = [ context_details["conditionning"], context_details["internet_search_results"], context_details["documentation"], @@ -267,9 +266,12 @@ class AIPersonality: context_details["positive_boost"], context_details["negative_boost"], context_details["fun_mode"], - self.ai_full_header if not is_continue else '' if not self.config.use_continue_message else "CONTINUE FROM HERE And do not open a new markdown code tag." + self.separator_template + self.ai_full_header + self.ai_full_header if not is_continue else '' if not self.config.use_continue_message \ + else "CONTINUE FROM HERE And do not open a new markdown code tag." + self.separator_template + self.ai_full_header ] - ) + + # Filter out empty elements and join with separator + prompt_data = self.separator_template.join(element for element in elements if element) tokens = self.model.tokenize(prompt_data) if return_tokens: return prompt_data, tokens