diff --git a/api/__init__.py b/api/__init__.py index ddd61a40..3ef0618b 100644 --- a/api/__init__.py +++ b/api/__init__.py @@ -1429,6 +1429,14 @@ class LoLLMsAPI(LollmsApplication): self.nb_received_tokens = 0 self.start_time = datetime.now() + def recover_discussion(self,client_id, message_index=-1): + messages = self.connections[client_id]["current_discussion"].get_messages() + discussion="" + for msg in messages: + if message_index!=-1 and msg>message_index: + break + discussion += "\n" + self.config.discussion_prompt_separator + msg.sender + ": " + msg.content.strip() + return discussion def prepare_query(self, client_id: str, message_id: int = -1, is_continue: bool = False, n_tokens: int = 0, generation_type = None) -> Tuple[str, str, List[str]]: """ Prepares the query for the model. @@ -1469,7 +1477,8 @@ class LoLLMsAPI(LollmsApplication): documentation="!@>Documentation:\n" if self.config.data_vectorization_build_keys_words: - query = self.personality.fast_gen("!@>prompt:"+current_message.content+"\n!@>instruction: Convert the prompt to a web search query."+"\nDo not answer the prompt. Do not add explanations. Use comma separated syntax to make a list of keywords in the same line.\nThe keywords should reflect the ideas written in the prompt so that a seach engine can process them efficiently.\n!@>query: ", max_generation_size=256, show_progress=True) + discussion = self.recover_discussion(client_id)[-512:] + query = self.personality.fast_gen(f"\n!@>instruction: Read the discussion and rewrite the last prompt for someone who didn't read the entire discussion.\nDo not answer the prompt. Do not add explanations.\n!@>discussion:\n{discussion}\n!@>enhanced query: ", max_generation_size=256, show_progress=True) ASCIIColors.cyan(f"Query:{query}") else: query = current_message.content