mirror of
https://github.com/ParisNeo/lollms.git
synced 2025-03-25 13:17:41 +00:00
Update app.py
This commit is contained in:
parent
9399e4fdd9
commit
517abb77eb
@ -188,11 +188,18 @@ class LollmsApplication(LoLLMsCom):
|
||||
summarized_chunks = []
|
||||
for chunk in chunks:
|
||||
prompt = "\n".join([
|
||||
f"!@>system:",
|
||||
"Analyze the following discussion chunk, focusing on the rank of each message to determine the relevance and quality of the information. Create a concise bullet-point summary of the key skills and important information contained in the high-ranking messages.",
|
||||
"Ignore negatively-ranked messages and exclude any irrelevant or garbage content. Return only the bullet-point summary without any additional commentary or explanations:",
|
||||
"!@>system:",
|
||||
"Conduct an in-depth analysis of the provided discussion chunk, taking into account the AI-given ranks for each message. Your objective is to distill the essence of the discussion into actionable insights for future work. Proceed with the following instructions:",
|
||||
"1. Review the messages and their corresponding ranks to determine the most relevant and high-quality contributions.",
|
||||
"2. Create a bullet-point summary that encapsulates the key skills, knowledge, and crucial information from messages with positive ranks.",
|
||||
"3. Extract lessons learned and best practices from the high-ranking messages that can be applied to enhance future projects.",
|
||||
"4. Identify any mentioned strategies or approaches that should be avoided, as indicated by their negative impact or low rank, to prevent future missteps.",
|
||||
"5. Exclude any messages with negative ranks and any content that is not pertinent or constructive to the analysis.",
|
||||
"6. Compile your findings into a succinct bullet-point list, focusing exclusively on the insights gained, without extraneous commentary.",
|
||||
"7. Make sure the summary is clear, to the point, and serves as an effective reference for the team's future endeavors.",
|
||||
f"{chunk}",
|
||||
"!@>analysis:\n"])
|
||||
|
||||
max_tokens = self.config.ctx_size - self.model.get_nb_tokens(prompt)
|
||||
if self.config.debug:
|
||||
ASCIIColors.yellow(prompt)
|
||||
|
Loading…
x
Reference in New Issue
Block a user