mirror of
https://github.com/ParisNeo/lollms.git
synced 2024-12-18 20:27:58 +00:00
enhanced
This commit is contained in:
parent
367111bab7
commit
059f36aae2
@ -1,5 +1,5 @@
|
||||
# =================== Lord Of Large Language Multimodal Systems Configuration file ===========================
|
||||
version: 123
|
||||
version: 124
|
||||
binding_name: null
|
||||
model_name: null
|
||||
model_variant: null
|
||||
@ -258,6 +258,7 @@ rag_activate_multi_hops: false #if true, we use multi hops algorithm to do multi
|
||||
rag_min_nb_tokens_in_chunk: 10 #this removed any useless junk ith less than x tokens
|
||||
rag_max_n_hops: 3 #We set the maximum number of hop in multi hops rag
|
||||
|
||||
contextual_summary: false #If activated this will completely replace the rag and instead will use contextual summary
|
||||
|
||||
activate_skills_lib: false # Activate vectorizing previous conversations
|
||||
skills_lib_database_name: "default" # Default skills database
|
||||
|
154
lollms/app.py
154
lollms/app.py
@ -16,6 +16,7 @@ from lollms.tasks import TasksLibrary
|
||||
from safe_store import TextVectorizer, VectorizationMethod, VisualizationMethod
|
||||
|
||||
from lollmsvectordb.database_elements.chunk import Chunk
|
||||
from lollmsvectordb.vector_database import VectorDatabase
|
||||
from typing import Callable
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
@ -1043,47 +1044,82 @@ class LollmsApplication(LoLLMsCom):
|
||||
"Cite Your Sources: After providing an answer, include the full path to the document where the information was found.",
|
||||
f"{self.start_header_id_template}Documentation{self.end_header_id_template}"])
|
||||
documentation += f"{self.separator_template}"
|
||||
results = []
|
||||
recovered_ids=[[]*len(self.active_rag_dbs)]
|
||||
i=0
|
||||
hop_id = 0
|
||||
while( len(results)<self.config.rag_n_chunks and hop_id<self.config.rag_max_n_hops):
|
||||
hop_id +=1
|
||||
full_documentation=""
|
||||
if self.config.contextual_summary:
|
||||
for db in self.active_rag_dbs:
|
||||
v = db["vectorizer"]
|
||||
r=v.search(query, self.config.rag_n_chunks, recovered_ids[i])
|
||||
recovered_ids[i].append([rg.chunk_id for rg in r])
|
||||
if self.config.rag_activate_multi_hops:
|
||||
r = [rg for rg in r if self.personality.verify_rag_entry(query, rg.content)]
|
||||
results+=r
|
||||
i+=1
|
||||
if len(results)>=self.config.rag_n_chunks:
|
||||
break
|
||||
n_neighbors = self.active_rag_dbs[0]["vectorizer"].n_neighbors
|
||||
sorted_results = sorted(results, key=lambda x: x.distance)[:n_neighbors]
|
||||
v:VectorDatabase = db["vectorizer"]
|
||||
docs = v.list_documents()
|
||||
for doc in docs:
|
||||
document=v.get_document(document_path = doc["path"])
|
||||
self.personality.step_start(f"Summeryzing document {doc['path']}")
|
||||
summary = self.personality.summerize_text(document, f"Extract information from the following text chunk to answer this request. If there is no information about the query, just return an empty string.\n{self.system_custom_header('query')}{query}", callback=self.personality.sink)
|
||||
self.personality.step_end(f"Summeryzing document {doc['path']}")
|
||||
document_infos = f"{self.separator_template}".join([
|
||||
self.system_custom_header('document contextual summary'),
|
||||
f"source_document_title:{doc['title']}",
|
||||
f"source_document_path:{doc['path']}",
|
||||
f"content:\n{summary}\n"
|
||||
])
|
||||
documentation_entries.append({
|
||||
"document_title":doc['title'],
|
||||
"document_path":doc['path'],
|
||||
"chunk_content":summary,
|
||||
"chunk_size":0,
|
||||
"distance":0,
|
||||
})
|
||||
if summary!="":
|
||||
v.add_summaries(doc['path'],[{"context":query, "summary":summary}])
|
||||
full_documentation += document_infos
|
||||
documentation += self.personality.summerize_text(full_documentation, f"Extract information from the current text chunk and previous text chunks to answer the query. If there is no information about the query, just return an empty string.\n{self.system_custom_header('query')}{query}", callback=self.personality.sink)
|
||||
|
||||
for chunk in sorted_results:
|
||||
document_infos = f"{self.separator_template}".join([
|
||||
f"{self.start_header_id_template}document chunk{self.end_header_id_template}",
|
||||
f"source_document_title:{chunk.doc.title}",
|
||||
f"source_document_path:{chunk.doc.path}",
|
||||
f"content:\n{chunk.text}\n"
|
||||
])
|
||||
documentation_entries.append({
|
||||
"document_title":chunk.doc.title,
|
||||
"document_path":chunk.doc.path,
|
||||
"chunk_content":chunk.text,
|
||||
"chunk_size":chunk.nb_tokens,
|
||||
"distance":chunk.distance,
|
||||
})
|
||||
documentation += document_infos
|
||||
|
||||
else:
|
||||
results = []
|
||||
recovered_ids=[[] for _ in range(len(self.active_rag_dbs))]
|
||||
hop_id = 0
|
||||
while( len(results)<self.config.rag_n_chunks and hop_id<self.config.rag_max_n_hops):
|
||||
i=0
|
||||
hop_id +=1
|
||||
for db in self.active_rag_dbs:
|
||||
v = db["vectorizer"]
|
||||
r=v.search(query, self.config.rag_n_chunks, recovered_ids[i])
|
||||
recovered_ids[i]+=[rg.chunk_id for rg in r]
|
||||
if self.config.rag_activate_multi_hops:
|
||||
r = [rg for rg in r if self.personality.verify_rag_entry(query, rg.text)]
|
||||
results+=r
|
||||
i+=1
|
||||
if len(results)>=self.config.rag_n_chunks:
|
||||
break
|
||||
n_neighbors = self.active_rag_dbs[0]["vectorizer"].n_neighbors
|
||||
sorted_results = sorted(results, key=lambda x: x.distance)[:n_neighbors]
|
||||
|
||||
for chunk in sorted_results:
|
||||
document_infos = f"{self.separator_template}".join([
|
||||
f"{self.start_header_id_template}document chunk{self.end_header_id_template}",
|
||||
f"source_document_title:{chunk.doc.title}",
|
||||
f"source_document_path:{chunk.doc.path}",
|
||||
f"content:\n{chunk.text}\n"
|
||||
])
|
||||
documentation_entries.append({
|
||||
"document_title":chunk.doc.title,
|
||||
"document_path":chunk.doc.path,
|
||||
"chunk_content":chunk.text,
|
||||
"chunk_size":chunk.nb_tokens,
|
||||
"distance":chunk.distance,
|
||||
})
|
||||
documentation += document_infos
|
||||
|
||||
if (len(client.discussion.text_files) > 0) and client.discussion.vectorizer is not None:
|
||||
if discussion is None:
|
||||
discussion = self.recover_discussion(client_id)
|
||||
|
||||
if documentation=="":
|
||||
documentation=f"{self.separator_template}{self.start_header_id_template}important information: Use the documentation data to answer the user questions. If the data is not present in the documentation, please tell the user that the information he is asking for does not exist in the documentation section. It is strictly forbidden to give the user an answer without having actual proof from the documentation.{self.separator_template}{self.start_header_id_template}Documentation:\n"
|
||||
documentation=f"{self.separator_template}".join([
|
||||
f"{self.separator_template}{self.start_header_id_template}important information{self.end_header_id_template}Utilize Documentation Data: Always refer to the provided documentation to answer user questions accurately.",
|
||||
"Absence of Information: If the required information is not available in the documentation, inform the user that the requested information is not present in the documentation section.",
|
||||
"Strict Adherence to Documentation: It is strictly prohibited to provide answers without concrete evidence from the documentation.",
|
||||
"Cite Your Sources: After providing an answer, include the full path to the document where the information was found.",
|
||||
f"{self.start_header_id_template}Documentation{self.end_header_id_template}"])
|
||||
documentation += f"{self.separator_template}"
|
||||
|
||||
if query is None:
|
||||
if self.config.data_vectorization_build_keys_words:
|
||||
@ -1094,18 +1130,46 @@ class LollmsApplication(LoLLMsCom):
|
||||
else:
|
||||
query = current_message.content
|
||||
|
||||
try:
|
||||
chunks:List[Chunk] = client.discussion.vectorizer.search(query, int(self.config.rag_n_chunks))
|
||||
for chunk in chunks:
|
||||
if self.config.data_vectorization_put_chunk_informations_into_context:
|
||||
documentation += f"{self.start_header_id_template}document chunk{self.end_header_id_template}\ndocument title: {chunk.doc.title}\nchunk content:\n{chunk.text}\n"
|
||||
else:
|
||||
documentation += f"{self.start_header_id_template}chunk{self.end_header_id_template}\n{chunk.text}\n"
|
||||
|
||||
documentation += f"{self.separator_template}{self.start_header_id_template}important information: Use the documentation data to answer the user questions. If the data is not present in the documentation, please tell the user that the information he is asking for does not exist in the documentation section. It is strictly forbidden to give the user an answer without having actual proof from the documentation.\n"
|
||||
except Exception as ex:
|
||||
trace_exception(ex)
|
||||
self.warning("Couldn't add documentation to the context. Please verify the vector database")
|
||||
full_documentation=""
|
||||
if self.config.contextual_summary:
|
||||
v = client.discussion.vectorizer
|
||||
docs = v.list_documents()
|
||||
for doc in docs:
|
||||
document=v.get_document(document_path = doc["path"])
|
||||
self.personality.step_start(f"Summeryzing document {doc['path']}")
|
||||
summary = self.personality.summerize_text(document, f"Extract information from the following text chunk to answer this request. If there is no information about the query, just return an empty string.\n{self.system_custom_header('query')}{query}", callback=self.personality.sink)
|
||||
self.personality.step_end(f"Summeryzing document {doc['path']}")
|
||||
document_infos = f"{self.separator_template}".join([
|
||||
self.system_custom_header('document contextual summary'),
|
||||
f"source_document_title:{doc['title']}",
|
||||
f"source_document_path:{doc['path']}",
|
||||
f"content:\n{summary}\n"
|
||||
])
|
||||
documentation_entries.append({
|
||||
"document_title":doc['title'],
|
||||
"document_path":doc['path'],
|
||||
"chunk_content":summary,
|
||||
"chunk_size":0,
|
||||
"distance":0,
|
||||
})
|
||||
if summary!="":
|
||||
v.add_summaries(doc['path'],[{"context":query, "summary":summary}])
|
||||
full_documentation += document_infos
|
||||
documentation += self.personality.summerize_text(full_documentation, f"Extract information from the current text chunk and previous text chunks to answer the query. If there is no information about the query, just return an empty string.\n{self.system_custom_header('query')}{query}", callback=self.personality.sink)
|
||||
else:
|
||||
try:
|
||||
chunks:List[Chunk] = client.discussion.vectorizer.search(query, int(self.config.rag_n_chunks))
|
||||
for chunk in chunks:
|
||||
if self.config.data_vectorization_put_chunk_informations_into_context:
|
||||
documentation += f"{self.start_header_id_template}document chunk{self.end_header_id_template}\ndocument title: {chunk.doc.title}\nchunk content:\n{chunk.text}\n"
|
||||
else:
|
||||
documentation += f"{self.start_header_id_template}chunk{self.end_header_id_template}\n{chunk.text}\n"
|
||||
|
||||
documentation += f"{self.separator_template}{self.start_header_id_template}important information: Use the documentation data to answer the user questions. If the data is not present in the documentation, please tell the user that the information he is asking for does not exist in the documentation section. It is strictly forbidden to give the user an answer without having actual proof from the documentation.\n"
|
||||
except Exception as ex:
|
||||
trace_exception(ex)
|
||||
self.warning("Couldn't add documentation to the context. Please verify the vector database")
|
||||
# Check if there is discussion knowledge to add to the prompt
|
||||
if self.config.activate_skills_lib:
|
||||
try:
|
||||
|
@ -1,5 +1,5 @@
|
||||
# =================== Lord Of Large Language Multimodal Systems Configuration file ===========================
|
||||
version: 123
|
||||
version: 124
|
||||
binding_name: null
|
||||
model_name: null
|
||||
model_variant: null
|
||||
@ -258,6 +258,7 @@ rag_activate_multi_hops: false #if true, we use multi hops algorithm to do multi
|
||||
rag_min_nb_tokens_in_chunk: 10 #this removed any useless junk ith less than x tokens
|
||||
rag_max_n_hops: 3 #We set the maximum number of hop in multi hops rag
|
||||
|
||||
contextual_summary: false #If activated this will completely replace the rag and instead will use contextual summary
|
||||
|
||||
activate_skills_lib: false # Activate vectorizing previous conversations
|
||||
skills_lib_database_name: "default" # Default skills database
|
||||
|
@ -1648,6 +1648,84 @@ class AIPersonality:
|
||||
"""
|
||||
self._processor_cfg = value
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# Properties ===============================================
|
||||
@property
|
||||
def start_header_id_template(self) -> str:
|
||||
"""Get the start_header_id_template."""
|
||||
return self.config.start_header_id_template
|
||||
|
||||
@property
|
||||
def end_header_id_template(self) -> str:
|
||||
"""Get the end_header_id_template."""
|
||||
return self.config.end_header_id_template
|
||||
|
||||
@property
|
||||
def system_message_template(self) -> str:
|
||||
"""Get the system_message_template."""
|
||||
return self.config.system_message_template
|
||||
|
||||
|
||||
@property
|
||||
def separator_template(self) -> str:
|
||||
"""Get the separator template."""
|
||||
return self.config.separator_template
|
||||
|
||||
|
||||
@property
|
||||
def start_user_header_id_template(self) -> str:
|
||||
"""Get the start_user_header_id_template."""
|
||||
return self.config.start_user_header_id_template
|
||||
@property
|
||||
def end_user_header_id_template(self) -> str:
|
||||
"""Get the end_user_header_id_template."""
|
||||
return self.config.end_user_header_id_template
|
||||
@property
|
||||
def end_user_message_id_template(self) -> str:
|
||||
"""Get the end_user_message_id_template."""
|
||||
return self.config.end_user_message_id_template
|
||||
|
||||
|
||||
|
||||
|
||||
@property
|
||||
def start_ai_header_id_template(self) -> str:
|
||||
"""Get the start_ai_header_id_template."""
|
||||
return self.config.start_ai_header_id_template
|
||||
@property
|
||||
def end_ai_header_id_template(self) -> str:
|
||||
"""Get the end_ai_header_id_template."""
|
||||
return self.config.end_ai_header_id_template
|
||||
@property
|
||||
def end_ai_message_id_template(self) -> str:
|
||||
"""Get the end_ai_message_id_template."""
|
||||
return self.config.end_ai_message_id_template
|
||||
@property
|
||||
def system_full_header(self) -> str:
|
||||
"""Get the start_header_id_template."""
|
||||
return f"{self.start_header_id_template}{self.system_message_template}{self.end_header_id_template}"
|
||||
@property
|
||||
def user_full_header(self) -> str:
|
||||
"""Get the start_header_id_template."""
|
||||
return f"{self.start_user_header_id_template}{self.config.user_name}{self.end_user_header_id_template}"
|
||||
@property
|
||||
def ai_full_header(self) -> str:
|
||||
"""Get the start_header_id_template."""
|
||||
return f"{self.start_user_header_id_template}{self.personality.name}{self.end_user_header_id_template}"
|
||||
|
||||
def system_custom_header(self, ai_name) -> str:
|
||||
"""Get the start_header_id_template."""
|
||||
return f"{self.start_user_header_id_template}{ai_name}{self.end_user_header_id_template}"
|
||||
|
||||
def ai_custom_header(self, ai_name) -> str:
|
||||
"""Get the start_header_id_template."""
|
||||
return f"{self.start_user_header_id_template}{ai_name}{self.end_user_header_id_template}"
|
||||
|
||||
|
||||
# ========================================== Helper methods ==========================================
|
||||
def detect_antiprompt(self, text:str) -> bool:
|
||||
"""
|
||||
@ -1701,8 +1779,216 @@ class AIPersonality:
|
||||
output_string = re.sub(pattern, replace, input_string)
|
||||
return output_string
|
||||
|
||||
def verify_rag_entry(self, query, rag_entry):
|
||||
return self.yes_no("Are there any useful information in the document chunk that can be used to answer the query?", self.app.system_custom_header("Query")+query+"\n"+self.app.system_custom_header("document chunk")+"\n"+rag_entry)
|
||||
|
||||
|
||||
def translate(self, text_chunk, output_language="french", max_generation_size=3000):
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
|
||||
translated = self.fast_gen(
|
||||
"\n".join([
|
||||
f"{start_header_id_template}{system_message_template}{end_header_id_template}",
|
||||
f"Translate the following text to {output_language}.",
|
||||
"Be faithful to the original text and do not add or remove any information.",
|
||||
"Respond only with the translated text.",
|
||||
"Do not add comments or explanations.",
|
||||
f"{start_header_id_template}text to translate{end_header_id_template}",
|
||||
f"{text_chunk}",
|
||||
f"{start_header_id_template}translation{end_header_id_template}",
|
||||
]),
|
||||
max_generation_size=max_generation_size, callback=self.sink)
|
||||
return translated
|
||||
|
||||
def summerize_text(
|
||||
self,
|
||||
text,
|
||||
summary_instruction="summerize",
|
||||
doc_name="chunk",
|
||||
answer_start="",
|
||||
max_generation_size=3000,
|
||||
max_summary_size=512,
|
||||
callback=None,
|
||||
chunk_summary_post_processing=None,
|
||||
summary_mode=SUMMARY_MODE.SUMMARY_MODE_SEQUENCIAL
|
||||
):
|
||||
tk = self.model.tokenize(text)
|
||||
prev_len = len(tk)
|
||||
document_chunks=None
|
||||
while len(tk)>max_summary_size and (document_chunks is None or len(document_chunks)>1):
|
||||
self.step_start(f"Comprerssing {doc_name}...")
|
||||
chunk_size = int(self.config.ctx_size*0.6)
|
||||
document_chunks = DocumentDecomposer.decompose_document(text, chunk_size, 0, self.model.tokenize, self.model.detokenize, True)
|
||||
text = self.summerize_chunks(
|
||||
document_chunks,
|
||||
summary_instruction,
|
||||
doc_name,
|
||||
answer_start,
|
||||
max_generation_size,
|
||||
callback,
|
||||
chunk_summary_post_processing=chunk_summary_post_processing,
|
||||
summary_mode=summary_mode)
|
||||
tk = self.model.tokenize(text)
|
||||
tk = self.model.tokenize(text)
|
||||
dtk_ln=prev_len-len(tk)
|
||||
prev_len = len(tk)
|
||||
self.step(f"Current text size : {prev_len}, max summary size : {max_summary_size}")
|
||||
self.step_end(f"Comprerssing {doc_name}...")
|
||||
if dtk_ln<=10: # it is not summarizing
|
||||
break
|
||||
return text
|
||||
|
||||
def smart_data_extraction(
|
||||
self,
|
||||
text,
|
||||
data_extraction_instruction=f"Summerize the current chunk.",
|
||||
final_task_instruction="reformulate with better wording",
|
||||
doc_name="chunk",
|
||||
answer_start="",
|
||||
max_generation_size=3000,
|
||||
max_summary_size=512,
|
||||
callback=None,
|
||||
chunk_summary_post_processing=None,
|
||||
summary_mode=SUMMARY_MODE.SUMMARY_MODE_SEQUENCIAL
|
||||
):
|
||||
tk = self.model.tokenize(text)
|
||||
prev_len = len(tk)
|
||||
while len(tk)>max_summary_size:
|
||||
chunk_size = int(self.config.ctx_size*0.6)
|
||||
document_chunks = DocumentDecomposer.decompose_document(text, chunk_size, 0, self.model.tokenize, self.model.detokenize, True)
|
||||
text = self.summerize_chunks(
|
||||
document_chunks,
|
||||
data_extraction_instruction,
|
||||
doc_name,
|
||||
answer_start,
|
||||
max_generation_size,
|
||||
callback,
|
||||
chunk_summary_post_processing=chunk_summary_post_processing,
|
||||
summary_mode=summary_mode
|
||||
)
|
||||
tk = self.model.tokenize(text)
|
||||
dtk_ln=prev_len-len(tk)
|
||||
prev_len = len(tk)
|
||||
self.step(f"Current text size : {prev_len}, max summary size : {max_summary_size}")
|
||||
if dtk_ln<=10: # it is not sumlmarizing
|
||||
break
|
||||
self.step_start(f"Rewriting ...")
|
||||
text = self.summerize_chunks(
|
||||
[text],
|
||||
final_task_instruction,
|
||||
doc_name, answer_start,
|
||||
max_generation_size,
|
||||
callback,
|
||||
chunk_summary_post_processing=chunk_summary_post_processing
|
||||
)
|
||||
self.step_end(f"Rewriting ...")
|
||||
|
||||
return text
|
||||
|
||||
def summerize_chunks(
|
||||
self,
|
||||
chunks,
|
||||
summary_instruction=f"Summerize the current chunk.",
|
||||
doc_name="chunk",
|
||||
answer_start="",
|
||||
max_generation_size=3000,
|
||||
callback=None,
|
||||
chunk_summary_post_processing=None,
|
||||
summary_mode=SUMMARY_MODE.SUMMARY_MODE_SEQUENCIAL
|
||||
):
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
|
||||
if summary_mode==SUMMARY_MODE.SUMMARY_MODE_SEQUENCIAL:
|
||||
summary = ""
|
||||
for i, chunk in enumerate(chunks):
|
||||
self.step_start(f" Summary of {doc_name} - Processing chunk : {i+1}/{len(chunks)}")
|
||||
summary = f"{answer_start}"+ self.fast_gen(
|
||||
"\n".join([
|
||||
self.system_custom_header("previous chunks analysis"),
|
||||
f"{summary}",
|
||||
self.system_custom_header("current chunk"),
|
||||
f"{chunk}",
|
||||
self.system_full_header,
|
||||
summary_instruction,
|
||||
f"Keep only information relevant to the context",
|
||||
f"the output must keep information from the previous chunk analysis and add the current chunk extracted information.",
|
||||
f"Be precise and do not invent information that does not exist in the previous chunks analysis or the current chunk.",
|
||||
f"Do not add any extra comments.",
|
||||
self.system_custom_header("cumulative chunks analysis")+answer_start
|
||||
]),
|
||||
max_generation_size=max_generation_size,
|
||||
callback=callback)
|
||||
if chunk_summary_post_processing:
|
||||
summary = chunk_summary_post_processing(summary)
|
||||
self.step_end(f" Summary of {doc_name} - Processing chunk : {i+1}/{len(chunks)}")
|
||||
return summary
|
||||
else:
|
||||
summeries = []
|
||||
for i, chunk in enumerate(chunks):
|
||||
self.step_start(f" Summary of {doc_name} - Processing chunk : {i+1}/{len(chunks)}")
|
||||
summary = f"{answer_start}"+ self.fast_gen(
|
||||
"\n".join([
|
||||
f"{start_header_id_template}Document_chunk [{doc_name}]{end_header_id_template}",
|
||||
f"{chunk}",
|
||||
f"{start_header_id_template}{system_message_template}{end_header_id_template}{summary_instruction}",
|
||||
f"Answer directly with the summary with no extra comments.",
|
||||
f"{start_header_id_template}summary{end_header_id_template}",
|
||||
f"{answer_start}"
|
||||
]),
|
||||
max_generation_size=max_generation_size,
|
||||
callback=callback)
|
||||
if chunk_summary_post_processing:
|
||||
summary = chunk_summary_post_processing(summary)
|
||||
summeries.append(summary)
|
||||
self.step_end(f" Summary of {doc_name} - Processing chunk : {i+1}/{len(chunks)}")
|
||||
return "\n".join(summeries)
|
||||
|
||||
def sequencial_chunks_summary(
|
||||
self,
|
||||
chunks,
|
||||
summary_instruction="summerize",
|
||||
doc_name="chunk",
|
||||
answer_start="",
|
||||
max_generation_size=3000,
|
||||
callback=None,
|
||||
chunk_summary_post_processing=None
|
||||
):
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
summeries = []
|
||||
for i, chunk in enumerate(chunks):
|
||||
if i<len(chunks)-1:
|
||||
chunk1 = chunks[i+1]
|
||||
else:
|
||||
chunk1=""
|
||||
if i>0:
|
||||
chunk=summary
|
||||
self.step_start(f" Summary of {doc_name} - Processing chunk : {i+1}/{len(chunks)}")
|
||||
summary = f"{answer_start}"+ self.fast_gen(
|
||||
"\n".join([
|
||||
f"{start_header_id_template}Document_chunk: {doc_name}{end_header_id_template}",
|
||||
f"Block1:",
|
||||
f"{chunk}",
|
||||
f"Block2:",
|
||||
f"{chunk1}",
|
||||
f"{start_header_id_template}{system_message_template}{end_header_id_template}{summary_instruction}",
|
||||
f"Answer directly with the summary with no extra comments.",
|
||||
f"{start_header_id_template}summary{end_header_id_template}",
|
||||
f"{answer_start}"
|
||||
]),
|
||||
max_generation_size=max_generation_size,
|
||||
callback=callback)
|
||||
if chunk_summary_post_processing:
|
||||
summary = chunk_summary_post_processing(summary)
|
||||
summeries.append(summary)
|
||||
self.step_end(f" Summary of {doc_name} - Processing chunk : {i+1}/{len(chunks)}")
|
||||
return "\n".join(summeries)
|
||||
|
||||
class StateMachine:
|
||||
def __init__(self, states_list):
|
||||
"""
|
||||
@ -2350,17 +2636,17 @@ class APScript(StateMachine):
|
||||
self.step_start(f" Summary of {doc_name} - Processing chunk : {i+1}/{len(chunks)}")
|
||||
summary = f"{answer_start}"+ self.fast_gen(
|
||||
"\n".join([
|
||||
f"{start_header_id_template}Previous_chunks_summary{end_header_id_template}",
|
||||
self.system_custom_header("previous chunks analysis"),
|
||||
f"{summary}",
|
||||
f"{start_header_id_template}Current_chunk{end_header_id_template}",
|
||||
self.system_custom_header("current chunk"),
|
||||
f"{chunk}",
|
||||
f"{start_header_id_template}{system_message_template}{end_header_id_template}{summary_instruction}",
|
||||
f"Summerize the current chunk and fuse it with previous chunk summary ion order to keep the required informations.",
|
||||
f"The summary needs to keep all relevant information.",
|
||||
f"Be precise and do not invent information that does not exist in the previous summary or the current chunk.",
|
||||
f"Answer directly with the summary with no extra comments.",
|
||||
f"{start_header_id_template}summary{end_header_id_template}",
|
||||
f"{answer_start}"
|
||||
self.system_full_header,
|
||||
summary_instruction,
|
||||
f"Keep only information relevant to the context",
|
||||
f"the output must keep information from the previous chunk analysis and add the current chunk extracted information.",
|
||||
f"Be precise and do not invent information that does not exist in the previous chunks analysis or the current chunk.",
|
||||
f"Do not add any extra comments.",
|
||||
self.system_custom_header("cumulative chunks analysis")+answer_start
|
||||
]),
|
||||
max_generation_size=max_generation_size,
|
||||
callback=callback)
|
||||
@ -3809,7 +4095,7 @@ fetch('/open_file', {
|
||||
def select_model(self, binding_name, model_name):
|
||||
self.personality.app.select_model(binding_name, model_name)
|
||||
def verify_rag_entry(self, query, rag_entry):
|
||||
return self.yes_no("Does the text entry contain the answer to the query?", self.system_custom_header("Query")+query+"\n"+self.system_custom_header("text entry")+":\n"+rag_entry)
|
||||
return self.yes_no("Are there any useful information in the document chunk that can be used to answer the query?", self.app.system_custom_header("Query")+query+"\n"+self.app.system_custom_header("document chunk")+"\n"+rag_entry)
|
||||
# Properties ===============================================
|
||||
@property
|
||||
def start_header_id_template(self) -> str:
|
||||
|
Loading…
Reference in New Issue
Block a user