mirror of
https://github.com/ParisNeo/lollms.git
synced 2024-12-22 22:12:27 +00:00
upgraded to new templating system
This commit is contained in:
parent
145267a8e1
commit
2dde56275a
.github/workflows
configs
lollms
app.pycom.pytasks.pytti.py
configs
databases
functions
personality.pyserver
services
dalle
diffusers
midjourney
sd
xtts
6
.github/workflows/build-publish.yaml
vendored
6
.github/workflows/build-publish.yaml
vendored
@ -19,8 +19,7 @@ jobs:
|
||||
cmake-version: 3.x # Specify your desired CMake version
|
||||
|
||||
- name: Build and Test (Linux)
|
||||
run: |
|
||||
mkdir build
|
||||
run: | mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
make
|
||||
@ -46,8 +45,7 @@ jobs:
|
||||
cmake-version: 3.0 # cmake version
|
||||
|
||||
- name: Build and Test (Windows)
|
||||
run: |
|
||||
mkdir build
|
||||
run: | mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
cmake --build . --config Release
|
||||
|
@ -1,5 +1,5 @@
|
||||
# =================== Lord Of Large Language Multimodal Systems Configuration file ===========================
|
||||
version: 104
|
||||
version: 106
|
||||
binding_name: null
|
||||
model_name: null
|
||||
model_variant: null
|
||||
@ -29,6 +29,11 @@ app_custom_logo: ""
|
||||
|
||||
# Genreration parameters
|
||||
discussion_prompt_separator: "!@>"
|
||||
start_header_id_template: "!@>"
|
||||
end_header_id_template: ": "
|
||||
separator_template: "\n"
|
||||
system_message_template: "system"
|
||||
|
||||
seed: -1
|
||||
ctx_size: 4084
|
||||
max_n_predict: 4096
|
||||
|
@ -182,6 +182,13 @@ class LollmsApplication(LoLLMsCom):
|
||||
return False
|
||||
|
||||
def add_discussion_to_skills_library(self, client: Client):
|
||||
discussion_prompt_separator = self.config.discussion_prompt_separator
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
separator_template = self.config.separator_template
|
||||
system_message_template = self.config.system_message_template
|
||||
|
||||
|
||||
messages = client.discussion.get_messages()
|
||||
|
||||
# Extract relevant information from messages
|
||||
@ -194,20 +201,20 @@ class LollmsApplication(LoLLMsCom):
|
||||
self.tasks_library.callback = bk_cb
|
||||
|
||||
# Generate title
|
||||
title_prompt = "\n".join([
|
||||
f"!@>system:Generate a concise and descriptive title for the following content.",
|
||||
title_prompt = f"{separator_template}".join([
|
||||
f"{start_header_id_template}{system_message_template}{end_header_id_template}Generate a concise and descriptive title for the following content.",
|
||||
"The title should summarize the main topic or subject of the content.",
|
||||
"Do not mention the format of the content (e.g., bullet points, discussion, etc.) in the title.",
|
||||
"Provide only the title without any additional explanations or context.",
|
||||
"!@>content:",
|
||||
f"{start_header_id_template}content{end_header_id_template}",
|
||||
f"{content}",
|
||||
"!@>title:"
|
||||
f"{start_header_id_template}title{end_header_id_template}"
|
||||
])
|
||||
|
||||
title = self._generate_text(title_prompt)
|
||||
|
||||
# Determine category
|
||||
category_prompt = f"!@>system:Analyze the following title, and determine the most appropriate generic category that encompasses the main subject or theme. The category should be broad enough to include multiple related skill entries. Provide only the category name without any additional explanations or context:\n\nTitle:\n{title}\n\n!@>Category:\n"
|
||||
category_prompt = f"{start_header_id_template}{system_message_template}{end_header_id_template}Analyze the following title, and determine the most appropriate generic category that encompasses the main subject or theme. The category should be broad enough to include multiple related skill entries. Provide only the category name without any additional explanations or context:\n\nTitle:\n{title}\n{separator_template}{start_header_id_template}Category:\n"
|
||||
category = self._generate_text(category_prompt)
|
||||
|
||||
# Add entry to skills library
|
||||
@ -763,6 +770,14 @@ class LollmsApplication(LoLLMsCom):
|
||||
Returns:
|
||||
Tuple[str, str, List[str]]: The prepared query, original message content, and tokenized query.
|
||||
"""
|
||||
discussion_prompt_separator = self.config.discussion_prompt_separator
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
separator_template = self.config.separator_template
|
||||
system_message_template = self.config.system_message_template
|
||||
|
||||
|
||||
|
||||
if self.personality.callback is None:
|
||||
self.personality.callback = partial(self.process_chunk, client_id=client_id)
|
||||
# Get the list of messages
|
||||
@ -802,7 +817,7 @@ class LollmsApplication(LoLLMsCom):
|
||||
conditionning = self.personality._personality_conditioning
|
||||
|
||||
if len(conditionning)>0:
|
||||
conditionning = self.personality.replace_keys(conditionning, self.personality.conditionning_commands) +"" if conditionning[-1]=="\n" else "\n"
|
||||
conditionning = start_header_id_template + system_message_template + end_header_id_template + self.personality.replace_keys(conditionning, self.personality.conditionning_commands) + ("" if conditionning[-1]==separator_template else separator_template)
|
||||
|
||||
# Check if there are document files to add to the prompt
|
||||
internet_search_results = ""
|
||||
@ -814,21 +829,21 @@ class LollmsApplication(LoLLMsCom):
|
||||
|
||||
# boosting information
|
||||
if self.config.positive_boost:
|
||||
positive_boost="\n!@>important information: "+self.config.positive_boost+"\n"
|
||||
positive_boost=f"{separator_template}{start_header_id_template}important information: "+self.config.positive_boost+"\n"
|
||||
n_positive_boost = len(self.model.tokenize(positive_boost))
|
||||
else:
|
||||
positive_boost=""
|
||||
n_positive_boost = 0
|
||||
|
||||
if self.config.negative_boost:
|
||||
negative_boost="\n!@>important information: "+self.config.negative_boost+"\n"
|
||||
negative_boost=f"{separator_template}{start_header_id_template}important information: "+self.config.negative_boost+"\n"
|
||||
n_negative_boost = len(self.model.tokenize(negative_boost))
|
||||
else:
|
||||
negative_boost=""
|
||||
n_negative_boost = 0
|
||||
|
||||
if self.config.fun_mode:
|
||||
fun_mode="\n!@>important information: Fun mode activated. In this mode you must answer in a funny playful way. Do not be serious in your answers. Each answer needs to make the user laugh.\n"
|
||||
fun_mode=f"{separator_template}{start_header_id_template}important information: Fun mode activated. In this mode you must answer in a funny playful way. Do not be serious in your answers. Each answer needs to make the user laugh.\n"
|
||||
n_fun_mode = len(self.model.tokenize(positive_boost))
|
||||
else:
|
||||
fun_mode=""
|
||||
@ -842,14 +857,14 @@ class LollmsApplication(LoLLMsCom):
|
||||
discussion = self.recover_discussion(client_id)
|
||||
if self.config.internet_activate_search_decision:
|
||||
self.personality.step_start(f"Requesting if {self.personality.name} needs to search internet to answer the user")
|
||||
need = not self.personality.yes_no(f"!@>system: Answer the question with yes or no. Don't add any extra explanation.\n!@>user: Do you have enough information to give a satisfactory answer to {self.config.user_name}'s request without internet search? (If you do not know or you can't answer 0 (no)", discussion)
|
||||
need = not self.personality.yes_no(f"{start_header_id_template}{system_message_template}{end_header_id_template}Answer the question with yes or no. Don't add any extra explanation.{separator_template}{start_header_id_template}user: Do you have enough information to give a satisfactory answer to {self.config.user_name}'s request without internet search? (If you do not know or you can't answer 0 (no)", discussion)
|
||||
self.personality.step_end(f"Requesting if {self.personality.name} needs to search internet to answer the user")
|
||||
self.personality.step("Yes" if need else "No")
|
||||
else:
|
||||
need=True
|
||||
if need:
|
||||
self.personality.step_start("Crafting internet search query")
|
||||
query = self.personality.fast_gen(f"!@>discussion:\n{discussion[-2048:]}\n!@>system: Read the discussion and craft a web search query suited to recover needed information to reply to last {self.config.user_name} message.\nDo not answer the prompt. Do not add explanations.\n!@>current date: {datetime.now()}\n!@>websearch query: ", max_generation_size=256, show_progress=True, callback=self.personality.sink)
|
||||
query = self.personality.fast_gen(f"{start_header_id_template}discussion{end_header_id_template}\n{discussion[-2048:]}{separator_template}{start_header_id_template}system: Read the discussion and craft a web search query suited to recover needed information to reply to last {self.config.user_name} message.\nDo not answer the prompt. Do not add explanations.{separator_template}{start_header_id_template}current date: {datetime.now()}{separator_template}{start_header_id_template}websearch query: ", max_generation_size=256, show_progress=True, callback=self.personality.sink)
|
||||
self.personality.step_end("Crafting internet search query")
|
||||
self.personality.step(f"web search query: {query}")
|
||||
|
||||
@ -858,14 +873,14 @@ class LollmsApplication(LoLLMsCom):
|
||||
else:
|
||||
self.personality.step_start("Performing Internet search (advanced mode: slower but more advanced)")
|
||||
|
||||
internet_search_results=f"!@>instructions: Use the web search results data to answer {self.config.user_name}. Try to extract information from the web search and use it to perform the requested task or answer the question. Do not come up with information that is not in the websearch results. Try to stick to the websearch results and clarify if your answer was based on the resuts or on your own culture. If you don't know how to perform the task, then tell the user politely that you need more data inputs.\n!@>Web search results:\n"
|
||||
internet_search_results=f"{start_header_id_template}instructions{end_header_id_template}Use the web search results data to answer {self.config.user_name}. Try to extract information from the web search and use it to perform the requested task or answer the question. Do not come up with information that is not in the websearch results. Try to stick to the websearch results and clarify if your answer was based on the resuts or on your own culture. If you don't know how to perform the task, then tell the user politely that you need more data inputs.{separator_template}{start_header_id_template}Web search results:\n"
|
||||
|
||||
docs, sorted_similarities, document_ids = self.personality.internet_search_with_vectorization(query, self.config.internet_quick_search)
|
||||
|
||||
if len(docs)>0:
|
||||
for doc, infos,document_id in zip(docs, sorted_similarities, document_ids):
|
||||
internet_search_infos.append(document_id)
|
||||
internet_search_results += f"!@>search result chunk:\nchunk_infos:{document_id['url']}\nchunk_title:{document_id['title']}\ncontent:{doc}\n"
|
||||
internet_search_results += f"{start_header_id_template}search result chunk{end_header_id_template}\nchunk_infos:{document_id['url']}\nchunk_title:{document_id['title']}\ncontent:{doc}\n"
|
||||
else:
|
||||
internet_search_results += "The search response was empty!\nFailed to recover useful information from the search engine.\n"
|
||||
if self.config.internet_quick_search:
|
||||
@ -875,12 +890,12 @@ class LollmsApplication(LoLLMsCom):
|
||||
|
||||
if self.personality.persona_data_vectorizer:
|
||||
if documentation=="":
|
||||
documentation="\n!@>Documentation:\n"
|
||||
documentation=f"{separator_template}{start_header_id_template}Documentation:\n"
|
||||
|
||||
if self.config.data_vectorization_build_keys_words:
|
||||
if discussion is None:
|
||||
discussion = self.recover_discussion(client_id)
|
||||
query = self.personality.fast_gen(f"\n!@>instruction: Read the discussion and rewrite the last prompt for someone who didn't read the entire discussion.\nDo not answer the prompt. Do not add explanations.\n!@>discussion:\n{discussion[-2048:]}\n!@>enhanced query: ", max_generation_size=256, show_progress=True)
|
||||
query = self.personality.fast_gen(f"{separator_template}{start_header_id_template}instruction: Read the discussion and rewrite the last prompt for someone who didn't read the entire discussion.\nDo not answer the prompt. Do not add explanations.{separator_template}{start_header_id_template}discussion:\n{discussion[-2048:]}{separator_template}{start_header_id_template}enhanced query: ", max_generation_size=256, show_progress=True)
|
||||
ASCIIColors.cyan(f"Query:{query}")
|
||||
else:
|
||||
query = current_message.content
|
||||
@ -888,9 +903,9 @@ class LollmsApplication(LoLLMsCom):
|
||||
docs, sorted_similarities, document_ids = self.personality.persona_data_vectorizer.recover_text(query, top_k=self.config.data_vectorization_nb_chunks)
|
||||
for doc, infos, doc_id in zip(docs, sorted_similarities, document_ids):
|
||||
if self.config.data_vectorization_put_chunk_informations_into_context:
|
||||
documentation += f"!@>document chunk:\nchunk_infos:{infos}\ncontent:{doc}\n"
|
||||
documentation += f"{start_header_id_template}document chunk{end_header_id_template}\nchunk_infos:{infos}\ncontent:{doc}\n"
|
||||
else:
|
||||
documentation += f"!@>chunk:\n{doc}\n"
|
||||
documentation += f"{start_header_id_template}chunk{end_header_id_template}\n{doc}\n"
|
||||
|
||||
except Exception as ex:
|
||||
trace_exception(ex)
|
||||
@ -901,11 +916,11 @@ class LollmsApplication(LoLLMsCom):
|
||||
discussion = self.recover_discussion(client_id)
|
||||
|
||||
if documentation=="":
|
||||
documentation="\n!@>important information: Use the documentation data to answer the user questions. If the data is not present in the documentation, please tell the user that the information he is asking for does not exist in the documentation section. It is strictly forbidden to give the user an answer without having actual proof from the documentation.\n!@>Documentation:\n"
|
||||
documentation=f"{separator_template}{start_header_id_template}important information: Use the documentation data to answer the user questions. If the data is not present in the documentation, please tell the user that the information he is asking for does not exist in the documentation section. It is strictly forbidden to give the user an answer without having actual proof from the documentation.{separator_template}{start_header_id_template}Documentation:\n"
|
||||
|
||||
if self.config.data_vectorization_build_keys_words:
|
||||
self.personality.step_start("Building vector store query")
|
||||
query = self.personality.fast_gen(f"\n!@>instruction: Read the discussion and rewrite the last prompt for someone who didn't read the entire discussion.\nDo not answer the prompt. Do not add explanations.\n!@>discussion:\n{discussion[-2048:]}\n!@>enhanced query: ", max_generation_size=256, show_progress=True, callback=self.personality.sink)
|
||||
query = self.personality.fast_gen(f"{separator_template}{start_header_id_template}instruction: Read the discussion and rewrite the last prompt for someone who didn't read the entire discussion.\nDo not answer the prompt. Do not add explanations.{separator_template}{start_header_id_template}discussion:\n{discussion[-2048:]}{separator_template}{start_header_id_template}enhanced query: ", max_generation_size=256, show_progress=True, callback=self.personality.sink)
|
||||
self.personality.step_end("Building vector store query")
|
||||
ASCIIColors.cyan(f"Query: {query}")
|
||||
else:
|
||||
@ -919,20 +934,20 @@ class LollmsApplication(LoLLMsCom):
|
||||
content = client.discussion.vectorizer.chunks[doc_index]['chunk_text']
|
||||
|
||||
if self.config.data_vectorization_put_chunk_informations_into_context:
|
||||
documentation += f"!@>document chunk:\nchunk_infos:{doc_id}\ncontent:{content}\n"
|
||||
documentation += f"{start_header_id_template}document chunk{end_header_id_template}\nchunk_infos:{doc_id}\ncontent:{content}\n"
|
||||
else:
|
||||
documentation += f"!@>chunk:\n{content}\n"
|
||||
documentation += f"{start_header_id_template}chunk{end_header_id_template}\n{content}\n"
|
||||
|
||||
docs, sorted_similarities, document_ids = client.discussion.vectorizer.recover_text(query, top_k=self.config.data_vectorization_nb_chunks)
|
||||
for doc, infos in zip(docs, sorted_similarities):
|
||||
if self.config.data_vectorization_force_first_chunk and len(client.discussion.vectorizer.chunks)>0 and infos[0]==doc_id:
|
||||
continue
|
||||
if self.config.data_vectorization_put_chunk_informations_into_context:
|
||||
documentation += f"!@>document chunk:\nchunk path: {infos[0]}\nchunk content:\n{doc}\n"
|
||||
documentation += f"{start_header_id_template}document chunk{end_header_id_template}\nchunk path: {infos[0]}\nchunk content:\n{doc}\n"
|
||||
else:
|
||||
documentation += f"!@>chunk:\n{doc}\n"
|
||||
documentation += f"{start_header_id_template}chunk{end_header_id_template}\n{doc}\n"
|
||||
|
||||
documentation += "\n!@>important information: Use the documentation data to answer the user questions. If the data is not present in the documentation, please tell the user that the information he is asking for does not exist in the documentation section. It is strictly forbidden to give the user an answer without having actual proof from the documentation.\n"
|
||||
documentation += f"{separator_template}{start_header_id_template}important information: Use the documentation data to answer the user questions. If the data is not present in the documentation, please tell the user that the information he is asking for does not exist in the documentation section. It is strictly forbidden to give the user an answer without having actual proof from the documentation.\n"
|
||||
except Exception as ex:
|
||||
trace_exception(ex)
|
||||
self.warning("Couldn't add documentation to the context. Please verify the vector database")
|
||||
@ -943,7 +958,7 @@ class LollmsApplication(LoLLMsCom):
|
||||
if discussion is None:
|
||||
discussion = self.recover_discussion(client_id)
|
||||
self.personality.step_start("Building query")
|
||||
query = self.personality.fast_gen(f"!@>Your task is to carefully read the provided discussion and reformulate {self.config.user_name}'s request concisely. Return only the reformulated request without any additional explanations, commentary, or output.\n!@>discussion:\n{discussion[-2048:]}\n!@>search query: ", max_generation_size=256, show_progress=True, callback=self.personality.sink)
|
||||
query = self.personality.fast_gen(f"{start_header_id_template}{system_message_template}{end_header_id_template}Your task is to carefully read the provided discussion and reformulate {self.config.user_name}'s request concisely. Return only the reformulated request without any additional explanations, commentary, or output.{separator_template}{start_header_id_template}discussion:\n{discussion[-2048:]}{separator_template}{start_header_id_template}search query: ", max_generation_size=256, show_progress=True, callback=self.personality.sink)
|
||||
self.personality.step_end("Building query")
|
||||
# skills = self.skills_library.query_entry(query)
|
||||
self.personality.step_start("Adding skills")
|
||||
@ -953,9 +968,9 @@ class LollmsApplication(LoLLMsCom):
|
||||
knowledge_infos={"titles":skill_titles,"contents":skills}
|
||||
if len(skills)>0:
|
||||
if knowledge=="":
|
||||
knowledge=f"!@>knowledge:\n"
|
||||
knowledge=f"{start_header_id_template}knowledge{end_header_id_template}\n"
|
||||
for i,(title, content) in enumerate(zip(skill_titles,skills)):
|
||||
knowledge += f"!@>knowledge {i}:\ntitle:\n{title}\ncontent:\n{content}\n"
|
||||
knowledge += f"{start_header_id_template}knowledge {i}{end_header_id_template}\ntitle:\n{title}\ncontent:\n{content}\n"
|
||||
self.personality.step_end("Adding skills")
|
||||
self.personality.step_end("Querying skills library")
|
||||
except Exception as ex:
|
||||
@ -965,7 +980,7 @@ class LollmsApplication(LoLLMsCom):
|
||||
self.personality.step_end("Querying skills library",False)
|
||||
user_description=""
|
||||
if self.config.use_user_informations_in_discussion:
|
||||
user_description="!@>User description:\n"+self.config.user_description+"\n"
|
||||
user_description=f"{start_header_id_template}User description{end_header_id_template}\n"+self.config.user_description+"\n"
|
||||
|
||||
|
||||
# Tokenize the conditionning text and calculate its number of tokens
|
||||
@ -1121,9 +1136,9 @@ class LollmsApplication(LoLLMsCom):
|
||||
ASCIIColors.bold("HISTORY")
|
||||
ASCIIColors.yellow(knowledge)
|
||||
ASCIIColors.bold("DISCUSSION")
|
||||
ASCIIColors.hilight(discussion_messages,"!@>",ASCIIColors.color_yellow,ASCIIColors.color_bright_red,False)
|
||||
ASCIIColors.hilight(discussion_messages,f"{start_header_id_template}",ASCIIColors.color_yellow,ASCIIColors.color_bright_red,False)
|
||||
ASCIIColors.bold("Final prompt")
|
||||
ASCIIColors.hilight(prompt_data,"!@>",ASCIIColors.color_yellow,ASCIIColors.color_bright_red,False)
|
||||
ASCIIColors.hilight(prompt_data,f"{start_header_id_template}",ASCIIColors.color_yellow,ASCIIColors.color_bright_red,False)
|
||||
ASCIIColors.info(f"prompt size:{len(tokens)} tokens")
|
||||
ASCIIColors.info(f"available space after doc and knowledge:{available_space} tokens")
|
||||
|
||||
|
@ -41,6 +41,7 @@ class LoLLMsCom:
|
||||
def __init__(self, sio:socketio.AsyncServer=None, verbose:bool=False) -> None:
|
||||
self.sio= sio
|
||||
self.verbose = verbose
|
||||
self.config = None
|
||||
self.tti = None
|
||||
self.tts = None
|
||||
self.stt = None
|
||||
|
@ -1,5 +1,5 @@
|
||||
# =================== Lord Of Large Language Multimodal Systems Configuration file ===========================
|
||||
version: 104
|
||||
version: 106
|
||||
binding_name: null
|
||||
model_name: null
|
||||
model_variant: null
|
||||
@ -29,6 +29,11 @@ app_custom_logo: ""
|
||||
|
||||
# Genreration parameters
|
||||
discussion_prompt_separator: "!@>"
|
||||
start_header_id_template: "!@>"
|
||||
end_header_id_template: ": "
|
||||
separator_template: "\n"
|
||||
system_message_template: "system"
|
||||
|
||||
seed: -1
|
||||
ctx_size: 4084
|
||||
max_n_predict: 4096
|
||||
|
@ -1030,10 +1030,12 @@ class Discussion:
|
||||
messages += f'{sender}: {content}\n'
|
||||
return title, messages
|
||||
|
||||
def format_discussion(self, max_allowed_tokens, splitter_text="!@>"):
|
||||
def format_discussion(self, max_allowed_tokens, splitter_text=None):
|
||||
if not splitter_text:
|
||||
splitter_text = self.lollms.config.discussion_prompt_separator
|
||||
formatted_text = ""
|
||||
for message in reversed(self.messages): # Start from the newest message
|
||||
formatted_message = f"{splitter_text}{message.sender.replace(':','').replace('!@>','')}:\n{message.content}\n"
|
||||
formatted_message = f"{splitter_text}{message.sender.replace(':','').replace(splitter_text,'')}:\n{message.content}\n"
|
||||
tokenized_message = self.lollms.model.tokenize(formatted_message)
|
||||
if len(tokenized_message) + len(self.lollms.model.tokenize(formatted_text)) <= max_allowed_tokens:
|
||||
formatted_text = formatted_message + formatted_text
|
||||
|
@ -8,7 +8,7 @@ from pathlib import Path
|
||||
from functools import partial
|
||||
|
||||
# It is advised to import typing elements
|
||||
from typing import List, Optional, Any
|
||||
from typing import List, Optional, Any, Tuple, Dict
|
||||
|
||||
# Import PackageManager if there are potential libraries that need to be installed
|
||||
from lollms.utilities import PackageManager, find_first_available_file_index, discussion_path_to_url
|
||||
@ -102,7 +102,6 @@ def arxiv_pdf_search(query: str, max_results: Optional[int] = 5, sort_by: Option
|
||||
<p><a href="{pdf_url}" target="_blank">PDF Link</a></p>
|
||||
<p><a href="{local_url}" target="_blank">Local PDF</a></p>
|
||||
</div>
|
||||
<hr />
|
||||
"""
|
||||
# Append to report content
|
||||
report_content += f"""
|
||||
|
@ -120,11 +120,15 @@ class AIPersonality:
|
||||
Raises:
|
||||
ValueError: If the provided path is not a folder or does not contain a config.yaml file.
|
||||
"""
|
||||
self.config = config
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
|
||||
self.bot_says = ""
|
||||
|
||||
self.lollms_paths = lollms_paths
|
||||
self.model = model
|
||||
self.config = config
|
||||
self.callback = callback
|
||||
self.app = app
|
||||
|
||||
@ -157,10 +161,11 @@ class AIPersonality:
|
||||
|
||||
self._languages: List[dict]=[]
|
||||
|
||||
|
||||
|
||||
# Conditionning
|
||||
self._personality_description: str = "This personality is a helpful and Kind AI ready to help you solve your problems"
|
||||
self._personality_conditioning: str = "\n".join([
|
||||
"!@>system:",
|
||||
"lollms (Lord of LLMs) is a smart and helpful Assistant built by the computer geek ParisNeo.",
|
||||
"It is compatible with many bindings to LLM models such as llama, gpt4all, gptj, autogptq etc.",
|
||||
"It can discuss with humans and assist them on many subjects.",
|
||||
@ -172,10 +177,9 @@ class AIPersonality:
|
||||
])
|
||||
self._welcome_message: str = "Welcome! I am lollms (Lord of LLMs) A free and open assistant built by ParisNeo. What can I do for you today?"
|
||||
self._include_welcome_message_in_discussion: bool = True
|
||||
self._user_message_prefix: str = "!@>human: "
|
||||
self._user_message_prefix: str = f"human:"
|
||||
self._link_text: str = "\n"
|
||||
self._ai_message_prefix: str = "!@>lollms:"
|
||||
self._anti_prompts:list = [self.config.discussion_prompt_separator]
|
||||
self._ai_message_prefix: str = f"lollms:"
|
||||
|
||||
# Extra
|
||||
self._dependencies: List[str] = []
|
||||
@ -187,7 +191,6 @@ class AIPersonality:
|
||||
|
||||
# Default model parameters
|
||||
self._model_temperature: float = 0.1 # higher: more creative, lower more deterministic
|
||||
self._model_n_predicts: int = 2048 # higher: generates many words, lower generates
|
||||
self._model_top_k: int = 50
|
||||
self._model_top_p: float = 0.95
|
||||
self._model_repeat_penalty: float = 1.3
|
||||
@ -435,25 +438,29 @@ class AIPersonality:
|
||||
Returns:
|
||||
int: Index of the selected option within the possible_ansers list. Or -1 if there was not match found among any of them.
|
||||
"""
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
|
||||
choices = "\n".join([f"{i}. {possible_answer}" for i, possible_answer in enumerate(possible_answers)])
|
||||
elements = [conditionning] if conditionning!="" else []
|
||||
elements += [
|
||||
"!@>instructions:",
|
||||
f"{start_header_id_template}{system_message_template}{end_header_id_template}",
|
||||
"Answer this multi choices question.",
|
||||
"Answer with an id from the possible answers.",
|
||||
"Do not answer with an id outside this possible answers.",
|
||||
]
|
||||
if context!="":
|
||||
elements+=[
|
||||
"!@>Context:",
|
||||
f"{start_header_id_template}context{end_header_id_template}",
|
||||
f"{context}",
|
||||
]
|
||||
elements += [
|
||||
f"!@>question: {question}",
|
||||
"!@>possible answers:",
|
||||
f"{start_header_id_template}question{end_header_id_template}{question}",
|
||||
f"{start_header_id_template}possible answers{end_header_id_template}",
|
||||
f"{choices}",
|
||||
]
|
||||
elements += ["!@>answer:"]
|
||||
elements += [f"{start_header_id_template}answer{end_header_id_template}"]
|
||||
prompt = self.build_prompt(elements)
|
||||
|
||||
gen = self.generate(prompt, max_answer_length, temperature=0.1, top_k=50, top_p=0.9, repeat_penalty=1.0, repeat_last_n=50, callback=self.sink).strip().replace("</s>","").replace("<s>","")
|
||||
@ -478,24 +485,28 @@ class AIPersonality:
|
||||
Returns:
|
||||
int: Index of the selected option within the possible_ansers list. Or -1 if there was not match found among any of them.
|
||||
"""
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
|
||||
choices = "\n".join([f"{i}. {possible_answer}" for i, possible_answer in enumerate(possible_answers)])
|
||||
elements = [conditionning] if conditionning!="" else []
|
||||
elements += [
|
||||
"!@>instructions:",
|
||||
f"{start_header_id_template}{system_message_template}{end_header_id_template}",
|
||||
"Answer this multi choices question.",
|
||||
"Answer with an id from the possible answers.",
|
||||
"Do not answer with an id outside this possible answers.",
|
||||
f"!@>question: {question}",
|
||||
"!@>possible answers:",
|
||||
f"{start_header_id_template}{end_header_id_template}{question}",
|
||||
f"{start_header_id_template}possible answers{end_header_id_template}",
|
||||
f"{choices}",
|
||||
]
|
||||
if context!="":
|
||||
elements+=[
|
||||
"!@>Context:",
|
||||
f"{start_header_id_template}context{end_header_id_template}",
|
||||
f"{context}",
|
||||
]
|
||||
|
||||
elements += ["!@>answer:"]
|
||||
elements += [f"{start_header_id_template}answer{end_header_id_template}"]
|
||||
prompt = self.build_prompt(elements)
|
||||
|
||||
gen = self.generate(prompt, max_answer_length, temperature=0.1, top_k=50, top_p=0.9, repeat_penalty=1.0, repeat_last_n=50).strip().replace("</s>","").replace("<s>","")
|
||||
@ -581,8 +592,12 @@ class AIPersonality:
|
||||
Returns:
|
||||
- str: The generated text after removing special tokens ("<s>" and "</s>") and stripping any leading/trailing whitespace.
|
||||
"""
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
|
||||
prompt = "\n".join([
|
||||
"!@>system: I am an AI assistant that can converse and analyze images. When asked to locate something in an image you send, I will reply with:",
|
||||
f"{start_header_id_template}{system_message_template}{end_header_id_template}I am an AI assistant that can converse and analyze images. When asked to locate something in an image you send, I will reply with:",
|
||||
"boundingbox(image_index, label, left, top, width, height)",
|
||||
"Where:",
|
||||
"image_index: 0-based index of the image",
|
||||
@ -822,13 +837,11 @@ class AIPersonality:
|
||||
self._user_message_prefix = config.get("user_message_prefix", self._user_message_prefix)
|
||||
self._link_text = config.get("link_text", self._link_text)
|
||||
self._ai_message_prefix = config.get("ai_message_prefix", self._ai_message_prefix)
|
||||
self._anti_prompts = [self.config.discussion_prompt_separator]+config.get("anti_prompts", self._anti_prompts)
|
||||
self._dependencies = config.get("dependencies", self._dependencies)
|
||||
self._disclaimer = config.get("disclaimer", self._disclaimer)
|
||||
self._help = config.get("help", self._help)
|
||||
self._commands = config.get("commands", self._commands)
|
||||
self._model_temperature = config.get("model_temperature", self._model_temperature)
|
||||
self._model_n_predicts = config.get("model_n_predicts", self._model_n_predicts)
|
||||
self._model_top_k = config.get("model_top_k", self._model_top_k)
|
||||
self._model_top_p = config.get("model_top_p", self._model_top_p)
|
||||
self._model_repeat_penalty = config.get("model_repeat_penalty", self._model_repeat_penalty)
|
||||
@ -889,7 +902,7 @@ class AIPersonality:
|
||||
files = [f for f in self.data_path.iterdir() if f.suffix.lower() in ['.asm', '.bat', '.c', '.cpp', '.cs', '.csproj', '.css',
|
||||
'.csv', '.docx', '.h', '.hh', '.hpp', '.html', '.inc', '.ini', '.java', '.js', '.json', '.log',
|
||||
'.lua', '.map', '.md', '.pas', '.pdf', '.php', '.pptx', '.ps1', '.py', '.rb', '.rtf', '.s', '.se', '.sh', '.sln',
|
||||
'.snippet', '.snippets', '.sql', '.sym', '.ts', '.txt', '.xlsx', '.xml', '.yaml', '.yml'] ]
|
||||
'.snippet', '.snippets', '.sql', '.sym', '.ts', '.txt', '.xlsx', '.xml', '.yaml', '.yml', '.msg'] ]
|
||||
if len(files)>0:
|
||||
dl = GenericDataLoader()
|
||||
self.persona_data_vectorizer = TextVectorizer(
|
||||
@ -1106,13 +1119,11 @@ class AIPersonality:
|
||||
"user_message_prefix": self._user_message_prefix,
|
||||
"link_text": self._link_text,
|
||||
"ai_message_prefix": self._ai_message_prefix,
|
||||
"anti_prompts": self._anti_prompts,
|
||||
"dependencies": self._dependencies,
|
||||
"disclaimer": self._disclaimer,
|
||||
"help": self._help,
|
||||
"commands": self._commands,
|
||||
"model_temperature": self._model_temperature,
|
||||
"model_n_predicts": self._model_n_predicts,
|
||||
"model_top_k": self._model_top_k,
|
||||
"model_top_p": self._model_top_p,
|
||||
"model_repeat_penalty": self._model_repeat_penalty,
|
||||
@ -1148,13 +1159,11 @@ class AIPersonality:
|
||||
"user_message_prefix": self._user_message_prefix,
|
||||
"link_text": self._link_text,
|
||||
"ai_message_prefix": self._ai_message_prefix,
|
||||
"anti_prompts": self._anti_prompts,
|
||||
"dependencies": self._dependencies,
|
||||
"disclaimer": self._disclaimer,
|
||||
"help": self._help,
|
||||
"commands": self._commands,
|
||||
"model_temperature": self._model_temperature,
|
||||
"model_n_predicts": self._model_n_predicts,
|
||||
"model_top_k": self._model_top_k,
|
||||
"model_top_p": self._model_top_p,
|
||||
"model_repeat_penalty": self._model_repeat_penalty,
|
||||
@ -1416,27 +1425,6 @@ class AIPersonality:
|
||||
"""
|
||||
self._ai_message_prefix = prefix
|
||||
|
||||
@property
|
||||
def anti_prompts(self):
|
||||
"""
|
||||
Get the anti-prompts list.
|
||||
|
||||
Returns:
|
||||
list: The anti-prompts list.
|
||||
"""
|
||||
return self._anti_prompts
|
||||
|
||||
@anti_prompts.setter
|
||||
def anti_prompts(self, prompts):
|
||||
"""
|
||||
Set the anti-prompts list.
|
||||
|
||||
Args:
|
||||
prompts (list): The anti-prompts list to set.
|
||||
"""
|
||||
self._anti_prompts = prompts
|
||||
|
||||
|
||||
@property
|
||||
def dependencies(self) -> List[str]:
|
||||
"""Getter method for the dependencies attribute.
|
||||
@ -1526,20 +1514,6 @@ class AIPersonality:
|
||||
"""
|
||||
self._model_temperature = value
|
||||
|
||||
@property
|
||||
def model_n_predicts(self) -> int:
|
||||
"""Get the number of predictions the model generates."""
|
||||
return self._model_n_predicts
|
||||
|
||||
@model_n_predicts.setter
|
||||
def model_n_predicts(self, value: int):
|
||||
"""Set the number of predictions the model generates.
|
||||
|
||||
Args:
|
||||
value (int): The new number of predictions value.
|
||||
"""
|
||||
self._model_n_predicts = value
|
||||
|
||||
@property
|
||||
def model_top_k(self) -> int:
|
||||
"""Get the model's top-k value."""
|
||||
@ -1652,7 +1626,11 @@ class AIPersonality:
|
||||
Returns:
|
||||
bool: True if any antiprompt is found in the text (ignoring case), False otherwise.
|
||||
"""
|
||||
for prompt in self.anti_prompts:
|
||||
anti_prompts = [self.app.config.discussion_prompt_separator]
|
||||
if self.app.config.separator_template!="\n":
|
||||
anti_prompts.append(self.app.config.separator_template)
|
||||
|
||||
for prompt in anti_prompts:
|
||||
if prompt.lower() in text.lower():
|
||||
return prompt.lower()
|
||||
return None
|
||||
@ -1897,6 +1875,7 @@ class APScript(StateMachine):
|
||||
self.notify = personality.app.notify
|
||||
|
||||
self.personality = personality
|
||||
self.config = personality.config
|
||||
self.personality_config = personality_config
|
||||
self.installation_option = personality.installation_option
|
||||
self.configuration_file_path = self.personality.lollms_paths.personal_configuration_path/"personalities"/self.personality.personality_folder_name/f"config.yaml"
|
||||
@ -1932,7 +1911,7 @@ class APScript(StateMachine):
|
||||
"""
|
||||
triggered when a new conversation is created
|
||||
"""
|
||||
return None
|
||||
return welcome_message
|
||||
|
||||
def selected(self):
|
||||
"""
|
||||
@ -2208,16 +2187,20 @@ class APScript(StateMachine):
|
||||
|
||||
|
||||
def translate(self, text_chunk, output_language="french", max_generation_size=3000):
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
|
||||
translated = self.fast_gen(
|
||||
"\n".join([
|
||||
f"!@>system:",
|
||||
f"{start_header_id_template}{system_message_template}{end_header_id_template}",
|
||||
f"Translate the following text to {output_language}.",
|
||||
"Be faithful to the original text and do not add or remove any information.",
|
||||
"Respond only with the translated text.",
|
||||
"Do not add comments or explanations.",
|
||||
f"!@>text to translate:",
|
||||
f"{start_header_id_template}text to translate{end_header_id_template}",
|
||||
f"{text_chunk}",
|
||||
f"!@>translation:",
|
||||
f"{start_header_id_template}translation{end_header_id_template}",
|
||||
]),
|
||||
max_generation_size=max_generation_size, callback=self.sink)
|
||||
return translated
|
||||
@ -2320,18 +2303,22 @@ class APScript(StateMachine):
|
||||
chunk_summary_post_processing=None,
|
||||
summary_mode=SUMMARY_MODE.SUMMARY_MODE_SEQUENCIAL
|
||||
):
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
|
||||
if summary_mode==SUMMARY_MODE.SUMMARY_MODE_SEQUENCIAL:
|
||||
summary = ""
|
||||
for i, chunk in enumerate(chunks):
|
||||
self.step_start(f" Summary of {doc_name} - Processing chunk : {i+1}/{len(chunks)}")
|
||||
summary = f"{answer_start}"+ self.fast_gen(
|
||||
"\n".join([
|
||||
f"!@>Document_chunk: {doc_name}:",
|
||||
f"{start_header_id_template}Document_chunk{end_header_id_template}{doc_name}:",
|
||||
f"{summary}",
|
||||
f"{chunk}",
|
||||
f"!@>instruction: {summary_instruction}",
|
||||
f"{start_header_id_template}{system_message_template}{end_header_id_template}{summary_instruction}",
|
||||
f"Answer directly with the summary with no extra comments.",
|
||||
f"!@>summary:",
|
||||
f"{start_header_id_template}summary{end_header_id_template}",
|
||||
f"{answer_start}"
|
||||
]),
|
||||
max_generation_size=max_generation_size,
|
||||
@ -2346,11 +2333,11 @@ class APScript(StateMachine):
|
||||
self.step_start(f" Summary of {doc_name} - Processing chunk : {i+1}/{len(chunks)}")
|
||||
summary = f"{answer_start}"+ self.fast_gen(
|
||||
"\n".join([
|
||||
f"!@>Document_chunk [{doc_name}]:",
|
||||
f"{start_header_id_template}Document_chunk [{doc_name}]{end_header_id_template}",
|
||||
f"{chunk}",
|
||||
f"!@>instruction: {summary_instruction}",
|
||||
f"{start_header_id_template}{system_message_template}{end_header_id_template}{summary_instruction}",
|
||||
f"Answer directly with the summary with no extra comments.",
|
||||
f"!@>summary:",
|
||||
f"{start_header_id_template}summary{end_header_id_template}",
|
||||
f"{answer_start}"
|
||||
]),
|
||||
max_generation_size=max_generation_size,
|
||||
@ -2371,6 +2358,9 @@ class APScript(StateMachine):
|
||||
callback=None,
|
||||
chunk_summary_post_processing=None
|
||||
):
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
summeries = []
|
||||
for i, chunk in enumerate(chunks):
|
||||
if i<len(chunks)-1:
|
||||
@ -2382,14 +2372,14 @@ class APScript(StateMachine):
|
||||
self.step_start(f" Summary of {doc_name} - Processing chunk : {i+1}/{len(chunks)}")
|
||||
summary = f"{answer_start}"+ self.fast_gen(
|
||||
"\n".join([
|
||||
f"!@>Document_chunk: {doc_name}:",
|
||||
f"{start_header_id_template}Document_chunk: {doc_name}{end_header_id_template}",
|
||||
f"Block1:",
|
||||
f"{chunk}",
|
||||
f"Block2:",
|
||||
f"{chunk1}",
|
||||
f"!@>instruction: {summary_instruction}",
|
||||
f"{start_header_id_template}{system_message_template}{end_header_id_template}{summary_instruction}",
|
||||
f"Answer directly with the summary with no extra comments.",
|
||||
f"!@>summary:",
|
||||
f"{start_header_id_template}summary{end_header_id_template}",
|
||||
f"{answer_start}"
|
||||
]),
|
||||
max_generation_size=max_generation_size,
|
||||
@ -2401,18 +2391,21 @@ class APScript(StateMachine):
|
||||
return "\n".join(summeries)
|
||||
|
||||
def build_prompt_from_context_details(self, context_details:dict, custom_entries=""):
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
return self.build_prompt([
|
||||
context_details["conditionning"] if context_details["conditionning"] else "",
|
||||
"!@>documentation:\n"+context_details["documentation"] if context_details["documentation"] else "",
|
||||
"!@>knowledge:\n"+context_details["knowledge"] if context_details["knowledge"] else "",
|
||||
f"{start_header_id_template}documentation{end_header_id_template}\n"+context_details["documentation"] if context_details["documentation"] else "",
|
||||
f"{start_header_id_template}knowledge{end_header_id_template}\n"+context_details["knowledge"] if context_details["knowledge"] else "",
|
||||
context_details["user_description"] if context_details["user_description"] else "",
|
||||
"!@>positive_boost:\n"+context_details["positive_boost"] if context_details["positive_boost"] else "",
|
||||
"!@>negative_boost:\n"+context_details["negative_boost"] if context_details["negative_boost"] else "",
|
||||
"!@>current_language:\n"+context_details["current_language"] if context_details["current_language"] else "",
|
||||
"!@>fun_mode:\n"+context_details["fun_mode"] if context_details["fun_mode"] else "",
|
||||
"!@>discussion_window:\n"+context_details["discussion_messages"] if context_details["discussion_messages"] else "",
|
||||
f"{start_header_id_template}positive_boost{end_header_id_template}\n"+context_details["positive_boost"] if context_details["positive_boost"] else "",
|
||||
f"{start_header_id_template}negative_boost{end_header_id_template}\n"+context_details["negative_boost"] if context_details["negative_boost"] else "",
|
||||
f"{start_header_id_template}current_language{end_header_id_template}\n"+context_details["current_language"] if context_details["current_language"] else "",
|
||||
f"{start_header_id_template}fun_mode{end_header_id_template}\n"+context_details["fun_mode"] if context_details["fun_mode"] else "",
|
||||
f"{start_header_id_template}discussion_window{end_header_id_template}\n"+context_details["discussion_messages"] if context_details["discussion_messages"] else "",
|
||||
custom_entries,
|
||||
"!@>"+context_details["ai_prefix"].replace("!@>","").replace(":","")+":"
|
||||
f"{start_header_id_template}"+context_details["ai_prefix"].replace(f"{start_header_id_template}","").replace(":","")+f"{end_header_id_template}"
|
||||
],
|
||||
8)
|
||||
def build_prompt(self, prompt_parts:List[str], sacrifice_id:int=-1, context_size:int=None, minimum_spare_context_size:int=None):
|
||||
@ -2734,19 +2727,23 @@ class APScript(StateMachine):
|
||||
return output.decode("utf8")
|
||||
|
||||
def build_python_code(self, prompt, max_title_length=4096):
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
|
||||
if not PackageManager.check_package_installed("autopep8"):
|
||||
PackageManager.install_package("autopep8")
|
||||
import autopep8
|
||||
global_prompt = "\n".join([
|
||||
f"{prompt}",
|
||||
"!@>Extra conditions:",
|
||||
f"{start_header_id_template}Extra conditions{end_header_id_template}",
|
||||
"- The code must be complete, not just snippets, and should be put inside a single python markdown code.",
|
||||
"-Preceive each python codeblock with a line using this syntax:",
|
||||
"$$file_name|the file path relative to the root folder of the project$$",
|
||||
"```python",
|
||||
"# Placeholder. Here you need to put the code for the file",
|
||||
"```",
|
||||
"!@>Code Builder:"
|
||||
f"{start_header_id_template}Code Builder{end_header_id_template}"
|
||||
])
|
||||
code = self.fast_gen(global_prompt, max_title_length)
|
||||
code_blocks = self.extract_code_blocks(code)
|
||||
@ -2772,7 +2769,11 @@ class APScript(StateMachine):
|
||||
Returns:
|
||||
str: The generated title.
|
||||
"""
|
||||
global_prompt = f"!@>instructions: Based on the provided prompt, suggest a concise and relevant title that captures the main topic or theme of the conversation. Only return the suggested title, without any additional text or explanation.\n!@>prompt: {prompt}\n!@>title:"
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
separator_template = self.config.separator_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
global_prompt = f"{start_header_id_template}{system_message_template}{end_header_id_template}Based on the provided prompt, suggest a concise and relevant title that captures the main topic or theme of the conversation. Only return the suggested title, without any additional text or explanation.{separator_template}{start_header_id_template}prompt{end_header_id_template}{prompt}{separator_template}{start_header_id_template}title{end_header_id_template}"
|
||||
title = self.fast_gen(global_prompt,max_title_length)
|
||||
return title
|
||||
|
||||
@ -2788,12 +2789,16 @@ class APScript(StateMachine):
|
||||
Returns:
|
||||
int: Index of the selected option within the possible_ansers list. Or -1 if there was not match found among any of them.
|
||||
"""
|
||||
template = """!@>instruction:
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
|
||||
template = f"""{start_header_id_template}{system_message_template}{end_header_id_template}
|
||||
Act as plan builder, a tool capable of making plans to perform the user requested operation.
|
||||
"""
|
||||
if len(actions_list)>0:
|
||||
template +="""The plan builder is an AI that responds in json format. It should plan a succession of actions in order to reach the objective.
|
||||
!@>list of action types information:
|
||||
template +=f"""The plan builder is an AI that responds in json format. It should plan a succession of actions in order to reach the objective.
|
||||
{start_header_id_template}list of action types information{end_header_id_template}
|
||||
[
|
||||
{{actions_list}}
|
||||
]
|
||||
@ -2817,12 +2822,12 @@ The AI should respond in this format using data from actions_list:
|
||||
}
|
||||
"""
|
||||
if context!="":
|
||||
template += """!@>Context:
|
||||
template += f"""{start_header_id_template}context{end_header_id_template}
|
||||
{{context}}Ok
|
||||
"""
|
||||
template +="""!@>request: {{request}}
|
||||
template +=f"""{start_header_id_template}request{end_header_id_template}{{request}}
|
||||
"""
|
||||
template +="""!@>plan: To acheive the requested objective, this is the list of actions to follow, formatted as requested in json format:\n```json\n"""
|
||||
template +=f"""{start_header_id_template}plan{end_header_id_template}To acheive the requested objective, this is the list of actions to follow, formatted as requested in json format:\n```json\n"""
|
||||
pr = PromptReshaper(template)
|
||||
prompt = pr.build({
|
||||
"context":context,
|
||||
@ -2851,12 +2856,16 @@ The AI should respond in this format using data from actions_list:
|
||||
Returns:
|
||||
int: Index of the selected option within the possible_ansers list. Or -1 if there was not match found among any of them.
|
||||
"""
|
||||
template = """!@>instruction:
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
|
||||
template = f"""{start_header_id_template}instruction:
|
||||
Act as plan builder, a tool capable of making plans to perform the user requested operation.
|
||||
"""
|
||||
if len(actions_list)>0:
|
||||
template +="""The plan builder is an AI that responds in json format. It should plan a succession of actions in order to reach the objective.
|
||||
!@>list of action types information:
|
||||
template +=f"""The plan builder is an AI that responds in json format. It should plan a succession of actions in order to reach the objective.
|
||||
{start_header_id_template}list of action types information{end_header_id_template}
|
||||
[
|
||||
{{actions_list}}
|
||||
]
|
||||
@ -2880,12 +2889,12 @@ The AI should respond in this format using data from actions_list:
|
||||
}
|
||||
"""
|
||||
if context!="":
|
||||
template += """!@>Context:
|
||||
template += f"""{start_header_id_template}context{end_header_id_template}
|
||||
{{context}}Ok
|
||||
"""
|
||||
template +="""!@>request: {{request}}
|
||||
template +=f"""{start_header_id_template}request{end_header_id_template}{{request}}
|
||||
"""
|
||||
template +="""!@>plan: To acheive the requested objective, this is the list of actions to follow, formatted as requested in json format:\n```json\n"""
|
||||
template +=f"""{start_header_id_template}plan{end_header_id_template}To acheive the requested objective, this is the list of actions to follow, formatted as requested in json format:\n```json\n"""
|
||||
pr = PromptReshaper(template)
|
||||
prompt = pr.build({
|
||||
"context":context,
|
||||
@ -3012,17 +3021,21 @@ The AI should respond in this format using data from actions_list:
|
||||
|
||||
|
||||
def build_and_execute_python_code(self,context, instructions, execution_function_signature, extra_imports=""):
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
|
||||
code = "```python\n"+self.fast_gen(
|
||||
self.build_prompt([
|
||||
"!@>context!:",
|
||||
f"{start_header_id_template}context{end_header_id_template}",
|
||||
context,
|
||||
f"!@>system:",
|
||||
f"{start_header_id_template}{system_message_template}{end_header_id_template}",
|
||||
f"{instructions}",
|
||||
f"Here is the signature of the function:\n{execution_function_signature}",
|
||||
"Don't call the function, just write it",
|
||||
"Do not provide usage example.",
|
||||
"The code must me without comments",
|
||||
f"!@>coder: Sure, in the following code, I import the necessary libraries, then define the function as you asked.",
|
||||
f"{start_header_id_template}coder{end_header_id_template}Sure, in the following code, I import the necessary libraries, then define the function as you asked.",
|
||||
"The function is ready to be used in your code and performs the task as you asked:",
|
||||
"```python\n"
|
||||
],2), callback=self.sink)
|
||||
@ -3070,15 +3083,19 @@ The AI should respond in this format using data from actions_list:
|
||||
Returns:
|
||||
int: Index of the selected option within the possible_ansers list. Or -1 if there was not match found among any of them.
|
||||
"""
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
|
||||
choices = "\n".join([f"{i}. {possible_answer}" for i, possible_answer in enumerate(possible_answers)])
|
||||
elements = [conditionning] if conditionning!="" else []
|
||||
elements += [
|
||||
"!@>system:",
|
||||
f"{start_header_id_template}{system_message_template}{end_header_id_template}",
|
||||
"Answer this multi choices question.",
|
||||
]
|
||||
if context!="":
|
||||
elements+=[
|
||||
"!@>Context:",
|
||||
f"{start_header_id_template}Context{end_header_id_template}",
|
||||
f"{context}",
|
||||
]
|
||||
elements +=[
|
||||
@ -3088,11 +3105,11 @@ The AI should respond in this format using data from actions_list:
|
||||
"the output should be an integer."
|
||||
]
|
||||
elements += [
|
||||
f"!@>question: {question}",
|
||||
"!@>possible answers:",
|
||||
f"{start_header_id_template}question{end_header_id_template}{question}",
|
||||
f"{start_header_id_template}possible answers{end_header_id_template}",
|
||||
f"{choices}",
|
||||
]
|
||||
elements += ["!@>answer:"]
|
||||
elements += [f"{start_header_id_template}answer{end_header_id_template}"]
|
||||
prompt = self.build_prompt(elements)
|
||||
|
||||
gen = self.generate(prompt, max_answer_length, temperature=0.1, top_k=50, top_p=0.9, repeat_penalty=1.0, repeat_last_n=50, callback=self.sink).strip().replace("</s>","").replace("<s>","")
|
||||
@ -3120,24 +3137,28 @@ The AI should respond in this format using data from actions_list:
|
||||
Returns:
|
||||
int: Index of the selected option within the possible_ansers list. Or -1 if there was not match found among any of them.
|
||||
"""
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
|
||||
choices = "\n".join([f"{i}. {possible_answer}" for i, possible_answer in enumerate(possible_answers)])
|
||||
elements = [conditionning] if conditionning!="" else []
|
||||
elements += [
|
||||
"!@>instructions:",
|
||||
f"{start_header_id_template}instructions{end_header_id_template}",
|
||||
"Answer this multi choices question.",
|
||||
"Answer with an id from the possible answers.",
|
||||
"Do not answer with an id outside this possible answers.",
|
||||
f"!@>question: {question}",
|
||||
"!@>possible answers:",
|
||||
f"{start_header_id_template}question{end_header_id_template}{question}",
|
||||
f"{start_header_id_template}possible answers{end_header_id_template}",
|
||||
f"{choices}",
|
||||
]
|
||||
if context!="":
|
||||
elements+=[
|
||||
"!@>Context:",
|
||||
f"{start_header_id_template}context{end_header_id_template}",
|
||||
f"{context}",
|
||||
]
|
||||
|
||||
elements += ["!@>answer:"]
|
||||
elements += [f"{start_header_id_template}answer{end_header_id_template}"]
|
||||
prompt = self.build_prompt(elements)
|
||||
|
||||
gen = self.generate(prompt, max_answer_length, temperature=0.1, top_k=50, top_p=0.9, repeat_penalty=1.0, repeat_last_n=50).strip().replace("</s>","").replace("<s>","")
|
||||
@ -3373,24 +3394,28 @@ The AI should respond in this format using data from actions_list:
|
||||
Returns:
|
||||
str: The upgraded prompt that includes information about the function calls.
|
||||
"""
|
||||
function_descriptions = ["!@>information: If you need to call a function to fulfull the user request, use a function markdown tag with the function call as the following json format:",
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
|
||||
function_descriptions = [f"{start_header_id_template}{system_message_template}{end_header_id_template}If you need to call a function to fulfull the user request, use a function markdown tag with the function call as the following json format:",
|
||||
"```function",
|
||||
"{",
|
||||
'"function_name":the name of the function to be called,',
|
||||
'"function_parameters": a list of parameter values',
|
||||
"}",
|
||||
"```",
|
||||
"Only use available functions.",
|
||||
"You can call multiple functions in one generation.",
|
||||
"Each function call needs to be in a separate function markdown tag.",
|
||||
"Do not add status of the execution as it will be added automatically by the system.",
|
||||
"If you want to get the output of the function before answering the user, then use the keyword @<NEXT>@ at the end of your message.",
|
||||
"!@>List of possible functions to be called:\n"]
|
||||
f"{start_header_id_template}Available functions{end_header_id_template}\n"]
|
||||
for function in functions:
|
||||
description = f"{function['function_name']}: {function['function_description']}\nparameters:{function['function_parameters']}"
|
||||
function_descriptions.append(description)
|
||||
|
||||
# Combine the function descriptions with the original prompt.
|
||||
function_info = ' '.join(function_descriptions)
|
||||
function_info = '\n'.join(function_descriptions)
|
||||
upgraded_prompt = f"{function_info}\n{prompt}"
|
||||
|
||||
return upgraded_prompt
|
||||
@ -3405,6 +3430,7 @@ The AI should respond in this format using data from actions_list:
|
||||
Returns:
|
||||
List[Dict[str, Any]]: A list of dictionaries representing the function calls.
|
||||
"""
|
||||
|
||||
# Extract markdown code blocks that contain JSON.
|
||||
code_blocks = self.extract_code_blocks(text)
|
||||
|
||||
@ -3427,7 +3453,18 @@ The AI should respond in this format using data from actions_list:
|
||||
return function_calls
|
||||
|
||||
|
||||
def interact_with_function_call(self, prompt, function_definitions, prompt_after_execution=True, callback = None, hide_function_call=False):
|
||||
def interact_with_function_call(
|
||||
self,
|
||||
prompt,
|
||||
function_definitions,
|
||||
prompt_after_execution=True,
|
||||
callback = None,
|
||||
hide_function_call=False,
|
||||
separate_output=False):
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
separator_template = self.config.separator_template
|
||||
final_output = ""
|
||||
if len(self.personality.image_files)>0:
|
||||
out, function_calls = self.generate_with_function_calls_and_images(prompt, self.personality.image_files, function_definitions, callback=callback)
|
||||
@ -3438,9 +3475,12 @@ The AI should respond in this format using data from actions_list:
|
||||
self.full("") #Hide function call
|
||||
outputs = self.execute_function_calls(function_calls,function_definitions)
|
||||
final_output = "\n".join([str(o) if type(o)==str else str(o[0]) if (type(o)==tuple or type(0)==list) and len(o)>0 else "" for o in outputs])
|
||||
out += "\n!@>function calls results:\n" + final_output
|
||||
out += f"{separator_template}{start_header_id_template}function calls results{end_header_id_template}\n" + final_output
|
||||
if prompt_after_execution:
|
||||
prompt += out +"\n"+ "!@>"+self.personality.name+":"
|
||||
if separate_output:
|
||||
self.full(final_output)
|
||||
self.new_message("")
|
||||
prompt += out +"\n"+ f"{start_header_id_template}"+self.personality.name+f"{end_header_id_template}"
|
||||
if len(self.personality.image_files)>0:
|
||||
out, function_calls = self.generate_with_function_calls_and_images(prompt, self.personality.image_files, function_definitions, callback=callback)
|
||||
else:
|
||||
@ -3449,8 +3489,8 @@ The AI should respond in this format using data from actions_list:
|
||||
if len(function_calls)>0:
|
||||
outputs = self.execute_function_calls(function_calls,function_definitions)
|
||||
final_output = "\n".join([str(o) if type(o)==str else str(o[0]) if (type(o)==tuple or type(0)==list) and len(o)>0 else "" for o in outputs])
|
||||
out += "\n!@>function calls results:\n" + final_output
|
||||
prompt += out +"\n"+ "!@>"+self.personality.name+":"
|
||||
out += f"{separator_template}{start_header_id_template}function calls results{end_header_id_template}\n" + final_output
|
||||
prompt += out +"\n"+ f"{start_header_id_template}"+self.personality.name+f"{end_header_id_template}"
|
||||
else:
|
||||
final_output = out
|
||||
return final_output
|
||||
|
@ -1,5 +1,5 @@
|
||||
# =================== Lord Of Large Language Multimodal Systems Configuration file ===========================
|
||||
version: 104
|
||||
version: 106
|
||||
binding_name: null
|
||||
model_name: null
|
||||
model_variant: null
|
||||
@ -29,6 +29,11 @@ app_custom_logo: ""
|
||||
|
||||
# Genreration parameters
|
||||
discussion_prompt_separator: "!@>"
|
||||
start_header_id_template: "!@>"
|
||||
end_header_id_template: ": "
|
||||
separator_template: "\n"
|
||||
system_message_template: "system"
|
||||
|
||||
seed: -1
|
||||
ctx_size: 4084
|
||||
max_n_predict: 4096
|
||||
|
@ -457,12 +457,12 @@ async def v1_chat_completions(request: ChatGenerationRequest):
|
||||
roles= False
|
||||
for message in messages:
|
||||
if message.role!="":
|
||||
prompt += f"!@>{message.role}: {message.content}\n"
|
||||
prompt += f"{elf_server.config.discussion_prompt_separator}{message.role}: {message.content}\n"
|
||||
roles = True
|
||||
else:
|
||||
prompt += f"{message.content}\n"
|
||||
if roles:
|
||||
prompt += "!@>assistant:"
|
||||
prompt += f"{elf_server.config.discussion_prompt_separator}assistant:"
|
||||
n_predict = max_tokens if max_tokens>0 else 1024
|
||||
stream = request.stream
|
||||
prompt_tokens = len(elf_server.binding.tokenize(prompt))
|
||||
@ -599,12 +599,12 @@ async def ollama_chat_completion(request: ChatGenerationRequest):
|
||||
roles= False
|
||||
for message in messages:
|
||||
if message.role!="":
|
||||
prompt += f"!@>{message.role}: {message.content}\n"
|
||||
prompt += f"{elf_server.config.discussion_prompt_separator}{message.role}: {message.content}\n"
|
||||
roles = True
|
||||
else:
|
||||
prompt += f"{message.content}\n"
|
||||
if roles:
|
||||
prompt += "!@>assistant:"
|
||||
prompt += f"{elf_server.config.discussion_prompt_separator}assistant:"
|
||||
n_predict = max_tokens if max_tokens>0 else 1024
|
||||
stream = request.stream
|
||||
prompt_tokens = len(elf_server.binding.tokenize(prompt))
|
||||
|
@ -76,6 +76,9 @@ def add_events(sio:socketio):
|
||||
prompt = model.detokenize(tokenized[-n_crop:])
|
||||
|
||||
n_predicts = data["n_predicts"]
|
||||
if n_predicts is None:
|
||||
n_predicts = lollmsElfServer.config.max_n_predict
|
||||
|
||||
parameters = data.get("parameters",{
|
||||
"temperature":lollmsElfServer.config["temperature"],
|
||||
"top_k":lollmsElfServer.config["top_k"],
|
||||
@ -176,7 +179,7 @@ def add_events(sio:socketio):
|
||||
|
||||
tk = personality.model.tokenize(full_discussion)
|
||||
n_tokens = len(tk)
|
||||
fd = personality.model.detokenize(tk[-min(lollmsElfServer.config.ctx_size-n_cond_tk-personality.model_n_predicts,n_tokens):])
|
||||
fd = personality.model.detokenize(tk[-min(lollmsElfServer.config.ctx_size-n_cond_tk-n_predicts, n_tokens):])
|
||||
|
||||
if personality.processor is not None and personality.processor_cfg["custom_workflow"]:
|
||||
ASCIIColors.info("processing...")
|
||||
@ -188,7 +191,7 @@ def add_events(sio:socketio):
|
||||
ASCIIColors.info("generating...")
|
||||
generated_text = personality.model.generate(
|
||||
personality.personality_conditioning+fd,
|
||||
n_predict=personality.model_n_predicts,
|
||||
n_predict=n_predicts,
|
||||
callback=callback)
|
||||
|
||||
if personality.processor is not None and personality.processor_cfg["process_model_output"]:
|
||||
|
@ -51,6 +51,7 @@ class LollmsDalle(LollmsTTI):
|
||||
def paint(
|
||||
self,
|
||||
prompt,
|
||||
negative_prompt,
|
||||
width=512,
|
||||
height=512,
|
||||
images = [],
|
||||
|
@ -156,8 +156,8 @@ class LollmsDiffusers(LollmsTTI):
|
||||
|
||||
def paint(
|
||||
self,
|
||||
diffusers_positive_prompt,
|
||||
diffusers_negative_prompt,
|
||||
positive_prompt,
|
||||
negative_prompt,
|
||||
files=[],
|
||||
sampler_name="Euler",
|
||||
seed=-1,
|
||||
@ -172,10 +172,10 @@ class LollmsDiffusers(LollmsTTI):
|
||||
if output_path is None:
|
||||
output_path = self.output_dir
|
||||
from diffusers.utils.pil_utils import pt_to_pil
|
||||
image = self.model(diffusers_positive_prompt, negative_prompt=diffusers_negative_prompt, guidance_scale=scale, num_inference_steps=steps,).images[0]
|
||||
image = self.model(positive_prompt, negative_prompt=negative_prompt, guidance_scale=scale, num_inference_steps=steps,).images[0]
|
||||
output_path = Path(output_path)
|
||||
fn = find_next_available_filename(output_path,"diff_img_")
|
||||
# Save the image
|
||||
image.save(fn)
|
||||
return fn, {"prompt":diffusers_positive_prompt, "negative_prompt":diffusers_negative_prompt}
|
||||
return fn, {"prompt":positive_prompt, "negative_prompt":negative_prompt}
|
||||
|
||||
|
@ -50,7 +50,8 @@ class LollmsMidjourney(LollmsTTI):
|
||||
|
||||
def paint(
|
||||
self,
|
||||
prompt,
|
||||
positive_prompt,
|
||||
negative_prompt,
|
||||
width=512,
|
||||
height=512,
|
||||
images = [],
|
||||
@ -111,7 +112,7 @@ class LollmsMidjourney(LollmsTTI):
|
||||
else:
|
||||
response = openai.images.generate(
|
||||
model=generation_engine,
|
||||
prompt=prompt.strip(),
|
||||
positive_prompt=positive_prompt.strip(),
|
||||
quality="standard",
|
||||
size=f"{width}x{height}",
|
||||
n=1,
|
||||
|
@ -356,8 +356,8 @@ class LollmsSD(LollmsTTI):
|
||||
|
||||
def paint(
|
||||
self,
|
||||
sd_positive_prompt,
|
||||
sd_negative_prompt,
|
||||
positive_prompt,
|
||||
negative_prompt,
|
||||
files=[],
|
||||
sampler_name="Euler",
|
||||
seed=-1,
|
||||
@ -376,8 +376,8 @@ class LollmsSD(LollmsTTI):
|
||||
if len(files)>0:
|
||||
try:
|
||||
generated = self.img2img(
|
||||
sd_positive_prompt,
|
||||
sd_negative_prompt,
|
||||
positive_prompt,
|
||||
negative_prompt,
|
||||
[self.loadImage(files[-1])],
|
||||
sampler_name=sampler_name,
|
||||
seed=seed,
|
||||
@ -405,8 +405,8 @@ class LollmsSD(LollmsTTI):
|
||||
else:
|
||||
try:
|
||||
generated = self.txt2img(
|
||||
sd_positive_prompt,
|
||||
negative_prompt=sd_negative_prompt,
|
||||
positive_prompt,
|
||||
negative_prompt=negative_prompt,
|
||||
sampler_name=sampler_name,
|
||||
seed=seed,
|
||||
cfg_scale=scale,
|
||||
|
@ -190,15 +190,17 @@ class LollmsXTTS(LollmsTTS):
|
||||
def update_settings(self):
|
||||
try:
|
||||
settings = {
|
||||
"stream_chunk_size": self.app.config.xtts_stream_chunk_size,
|
||||
"temperature": self.app.config.xtts_temperature,
|
||||
"speed": self.app.config.xtts_speed,
|
||||
"length_penalty": self.app.config.xtts_length_penalty,
|
||||
"repetition_penalty": self.app.config.xtts_repetition_penalty,
|
||||
"top_p": self.app.config.xtts_top_p,
|
||||
"top_k": self.app.config.xtts_top_k,
|
||||
"enable_text_splitting": self.app.config.xtts_enable_text_splitting
|
||||
}
|
||||
"stream_chunk_size": int(self.app.config.xtts_stream_chunk_size),
|
||||
"temperature": float(self.app.config.xtts_temperature),
|
||||
"speed": float(self.app.config.xtts_speed),
|
||||
"length_penalty": float(self.app.config.xtts_length_penalty),
|
||||
"repetition_penalty": float(self.app.config.xtts_repetition_penalty),
|
||||
"top_p": float(self.app.config.xtts_top_p),
|
||||
"top_k": int(self.app.config.xtts_top_k),
|
||||
"enable_text_splitting": bool(self.app.config.xtts_enable_text_splitting)
|
||||
}
|
||||
print("set_tts_settings")
|
||||
print(f"{settings}")
|
||||
response = requests.post(f"{self.xtts_base_url}/set_tts_settings", settings,headers={
|
||||
'accept': 'application/json',
|
||||
'Content-Type': 'application/json'
|
||||
|
@ -12,8 +12,11 @@ import json
|
||||
class TasksLibrary:
|
||||
def __init__(self, lollms:LoLLMsCom, callback: Callable[[str, MSG_TYPE, dict, list], bool]=None) -> None:
|
||||
self.lollms = lollms
|
||||
self.config = lollms.config
|
||||
self.callback = callback
|
||||
self.anti_prompts = [self.lollms.config.discussion_prompt_separator]+["!@>"]
|
||||
self.anti_prompts = [lollms.config.discussion_prompt_separator]
|
||||
if lollms.config.separator_template!="\n":
|
||||
self.anti_prompts.append(lollms.config.separator_template)
|
||||
|
||||
def print_prompt(self, title, prompt):
|
||||
ASCIIColors.red("*-*-*-*-*-*-*-* ", end="")
|
||||
@ -182,8 +185,12 @@ class TasksLibrary:
|
||||
Returns:
|
||||
- str: The generated text after removing special tokens ("<s>" and "</s>") and stripping any leading/trailing whitespace.
|
||||
"""
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
separator_template = self.config.separator_template
|
||||
prompt = "\n".join([
|
||||
"!@>system: I am an AI assistant that can converse and analyze images. When asked to locate something in an image you send, I will reply with:",
|
||||
f"{start_header_id_template}{system_message_template}{end_header_id_template}I am an AI assistant that can converse and analyze images. When asked to locate something in an image you send, I will reply with:",
|
||||
"boundingbox(image_index, label, left, top, width, height)",
|
||||
"Where:",
|
||||
"image_index: 0-based index of the image",
|
||||
@ -510,25 +517,33 @@ class TasksLibrary:
|
||||
return code_blocks
|
||||
|
||||
def translate_conditionning(self, prompt, original_language, language):
|
||||
conditionning_translation_text = f"!@>instruction: Translate the following prompt to {language}.\nDo not translate any css or code, just the text and strings.\n!@>prompt:\n```{original_language}\n{prompt.replace('!@>','')}\n```\n!@>translation:\nHere is the translated prompt:\n```{language}\n"
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
separator_template = self.config.separator_template
|
||||
conditionning_translation_text = f"{start_header_id_template}{system_message_template}{end_header_id_template}Translate the following prompt to {language}.\nDo not translate any css or code, just the text and strings.{separator_template}{start_header_id_template}prompt{end_header_id_template}\n```{original_language}\n{prompt.replace(f'{start_header_id_template}','')}\n```{separator_template}{start_header_id_template}translation{end_header_id_template}\nHere is the translated prompt:\n```{language}\n"
|
||||
cond_translation = f"```{language}\n"+self.fast_gen(conditionning_translation_text, temperature=0.1, callback=self.sink)
|
||||
response = self.extract_code_blocks(cond_translation)
|
||||
if len(response)>0 and len(response[0]["content"])>0:
|
||||
conditionning = "!@>system: "+response[0]["content"]
|
||||
conditionning = response[0]["content"]
|
||||
else:
|
||||
ASCIIColors.print(f"Failed to translate the conditionning message. Reverting to english conditionning with a request to use the lanuage {language}")
|
||||
conditionning = prompt + f"\nAlways answer in {language}\n"
|
||||
return conditionning
|
||||
|
||||
def translate_message(self, prompt, original_language, language):
|
||||
message_translation_text = f"!@>instruction: Translate the following message to {language}.\nDo not translate any css or code, just the text and strings.\n!@>prompt:\n```{original_language}\n{prompt.replace('!@>','')}\n```\n!@>translation:\n```{language}\n"
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
separator_template = self.config.separator_template
|
||||
message_translation_text = f"{start_header_id_template}{system_message_template}{end_header_id_template}Translate the following message to {language}.\nDo not translate any css or code, just the text and strings.{separator_template}{start_header_id_template}prompt:\n```{original_language}\n{prompt.replace(f'{start_header_id_template}','')}\n```{separator_template}{start_header_id_template}translation{end_header_id_template}\n```{language}\n"
|
||||
cond_translation = f"```{language}\n"+self.fast_gen(message_translation_text, temperature=0.1, callback=self.sink)
|
||||
response = self.extract_code_blocks(cond_translation)
|
||||
if len(response)>0 and len(response[0]["content"])>0:
|
||||
translated = response[0]["content"]
|
||||
else:
|
||||
ASCIIColors.print(f"Failed to translate the message. Reverting to english conditionning with a request to use the lanuage {language}")
|
||||
message_translation_text = f"!@>instruction: Translate the following message to {language}.\nDo not translate any css or code, just the text and strings.\n!@>message:\n{prompt.replace('!@>','')}\n!@>translation:\n"
|
||||
message_translation_text = f"{start_header_id_template}{system_message_template}{end_header_id_template}Translate the following message to {language}.\nDo not translate any css or code, just the text and strings.{separator_template}{start_header_id_template}message{end_header_id_template}\n{prompt.replace(f'{start_header_id_template}','')}{separator_template}{start_header_id_template}translation{end_header_id_template}\n"
|
||||
translated = self.fast_gen(message_translation_text, temperature=0.1, callback=self.sink)
|
||||
return translated
|
||||
|
||||
@ -630,6 +645,10 @@ class TasksLibrary:
|
||||
chunk_summary_post_processing=None,
|
||||
summary_mode=SUMMARY_MODE.SUMMARY_MODE_SEQUENCIAL
|
||||
):
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
separator_template = self.config.separator_template
|
||||
if summary_mode==SUMMARY_MODE.SUMMARY_MODE_SEQUENCIAL:
|
||||
summary = ""
|
||||
for i, chunk in enumerate(chunks):
|
||||
@ -637,16 +656,16 @@ class TasksLibrary:
|
||||
if summary !="":
|
||||
summary = f"{answer_start}"+ self.fast_gen(
|
||||
"\n".join([
|
||||
f"!@>Document_chunk: {doc_name}:",
|
||||
f"{start_header_id_template}Document_chunk: {doc_name}{end_header_id_template}",
|
||||
f"This is a cumulative summary step. Use the summary of the previous chunks and the current chunk of the document to make a new summary integrating information from both. Make sure not to loose information from previous summaries",
|
||||
f"Summary of previous chunks",
|
||||
f"{summary}",
|
||||
f"current chunk:",
|
||||
f"{chunk}",
|
||||
f"!@>instruction: {summary_instruction}",
|
||||
f"{start_header_id_template}{system_message_template}{end_header_id_template}{summary_instruction}",
|
||||
f"The summary should extract required information from the current chunk to increment the previous summary.",
|
||||
f"Answer directly with the cumulative summary with no extra comments.",
|
||||
f"!@>summary:",
|
||||
f"{start_header_id_template}summary{end_header_id_template}",
|
||||
f"{answer_start}"
|
||||
]),
|
||||
max_generation_size=max_generation_size,
|
||||
@ -654,12 +673,12 @@ class TasksLibrary:
|
||||
else:
|
||||
summary = f"{answer_start}"+ self.fast_gen(
|
||||
"\n".join([
|
||||
f"!@>Document_chunk: {doc_name}:",
|
||||
f"{start_header_id_template}Document_chunk: {doc_name}{end_header_id_template}",
|
||||
f"current chunk:",
|
||||
f"{chunk}",
|
||||
f"!@>instruction: {summary_instruction}",
|
||||
f"{start_header_id_template}{system_message_template}{end_header_id_template}{summary_instruction}",
|
||||
f"Answer directly with the summary with no extra comments.",
|
||||
f"!@>summary:",
|
||||
f"{start_header_id_template}summary{end_header_id_template}",
|
||||
f"{answer_start}"
|
||||
]),
|
||||
max_generation_size=max_generation_size,
|
||||
@ -674,11 +693,11 @@ class TasksLibrary:
|
||||
self.step_start(f" Summary of {doc_name} - Processing chunk : {i+1}/{len(chunks)}")
|
||||
summary = f"{answer_start}"+ self.fast_gen(
|
||||
"\n".join([
|
||||
f"!@>Document_chunk [{doc_name}]:",
|
||||
f"{start_header_id_template}Document_chunk [{doc_name}]{end_header_id_template}",
|
||||
f"{chunk}",
|
||||
f"!@>instruction: {summary_instruction}",
|
||||
f"{start_header_id_template}{system_message_template}{end_header_id_template}{summary_instruction}",
|
||||
f"Answer directly with the summary with no extra comments.",
|
||||
f"!@>summary:",
|
||||
f"{start_header_id_template}summary{end_header_id_template}",
|
||||
f"{answer_start}"
|
||||
]),
|
||||
max_generation_size=max_generation_size,
|
||||
@ -699,6 +718,10 @@ class TasksLibrary:
|
||||
callback=None,
|
||||
chunk_summary_post_processing=None
|
||||
):
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
separator_template = self.config.separator_template
|
||||
summeries = []
|
||||
for i, chunk in enumerate(chunks):
|
||||
if i<len(chunks)-1:
|
||||
@ -710,14 +733,14 @@ class TasksLibrary:
|
||||
self.step_start(f" Summary of {doc_name} - Processing chunk : {i+1}/{len(chunks)}")
|
||||
summary = f"{answer_start}"+ self.fast_gen(
|
||||
"\n".join([
|
||||
f"!@>Document_chunk: {doc_name}:",
|
||||
f"{start_header_id_template}Document_chunk: {doc_name}{end_header_id_template}",
|
||||
f"Block1:",
|
||||
f"{chunk}",
|
||||
f"Block2:",
|
||||
f"{chunk1}",
|
||||
f"!@>instruction: {summary_instruction}",
|
||||
f"{start_header_id_template}{system_message_template}{end_header_id_template}{summary_instruction}",
|
||||
f"Answer directly with the summary with no extra comments.",
|
||||
f"!@>summary:",
|
||||
f"{start_header_id_template}summary{end_header_id_template}:",
|
||||
f"{answer_start}"
|
||||
]),
|
||||
max_generation_size=max_generation_size,
|
||||
@ -740,7 +763,11 @@ class TasksLibrary:
|
||||
Returns:
|
||||
str: The upgraded prompt that includes information about the function calls.
|
||||
"""
|
||||
function_descriptions = ["!@>information: If you need to call a function to fulfull the user request, use a function markdown tag with the function call as the following json format:",
|
||||
start_header_id_template = self.config.start_header_id_template
|
||||
end_header_id_template = self.config.end_header_id_template
|
||||
system_message_template = self.config.system_message_template
|
||||
separator_template = self.config.separator_template
|
||||
function_descriptions = [f"{start_header_id_template}information{end_header_id_template}If you need to call a function to fulfull the user request, use a function markdown tag with the function call as the following json format:",
|
||||
"```function",
|
||||
"{",
|
||||
'"function_name":the name of the function to be called,',
|
||||
@ -750,8 +777,7 @@ class TasksLibrary:
|
||||
"You can call multiple functions in one generation.",
|
||||
"Each function call needs to be in a separate function markdown tag.",
|
||||
"Do not add status of the execution as it will be added automatically by the system.",
|
||||
"If you want to get the output of the function before answering the user, then use the keyword @<NEXT>@ at the end of your message.",
|
||||
"!@>List of possible functions to be called:\n"]
|
||||
f"{start_header_id_template}List of possible functions to be called{end_header_id_template}\n"]
|
||||
for function in functions:
|
||||
description = f"{function['function_name']}: {function['function_description']}\nparameters:{function['function_parameters']}"
|
||||
function_descriptions.append(description)
|
||||
|
@ -48,7 +48,14 @@ class LollmsTTI:
|
||||
self.voices = [] # To be filled by the child class
|
||||
self.models = [] # To be filled by the child class
|
||||
|
||||
def paint(self, positive_prompt: str, negative_prompt: str = "") -> List[Dict[str, str]]:
|
||||
def paint(self,
|
||||
positive_prompt: str,
|
||||
negative_prompt: str = "",
|
||||
width=512,
|
||||
height=512,
|
||||
images = [],
|
||||
generation_engine=None,
|
||||
output_path = None) -> List[Dict[str, str]]:
|
||||
"""
|
||||
Generates images based on the given positive and negative prompts.
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user