Moved all personalities to new prompting format

This commit is contained in:
Saifeddine ALOUI 2025-02-17 02:02:07 +01:00
parent 42ea55c4ed
commit 862fcc594f
7 changed files with 515 additions and 251 deletions

View File

@ -12,6 +12,8 @@ from lollms.utilities import PromptReshaper
from lollms.client_session import Client, Session
from lollms.databases.skills_database import SkillsLibrary
from lollms.tasks import TasksLibrary
from lollms.prompting import LollmsLLMTemplate, LollmsContextDetails
import importlib
from lollmsvectordb.database_elements.chunk import Chunk
from lollmsvectordb.vector_database import VectorDatabase
@ -99,7 +101,8 @@ class LollmsApplication(LoLLMsCom):
self.rt_com = None
self.is_internet_available = self.check_internet_connection()
self.template = LollmsLLMTemplate(self.config, self.personality)
if not free_mode:
try:
if config.auto_update and self.is_internet_available:
@ -1028,7 +1031,7 @@ class LollmsApplication(LoLLMsCom):
discussion += "\n" + self.config.discussion_prompt_separator + msg.sender + ": " + msg.content.strip()
return discussion
# -------------------------------------- Prompt preparing
def prepare_query(self, client_id: str, message_id: int = -1, is_continue: bool = False, n_tokens: int = 0, generation_type = None, force_using_internet=False, previous_chunk="") -> Tuple[str, str, List[str]]:
def prepare_query(self, client_id: str, message_id: int = -1, is_continue: bool = False, n_tokens: int = 0, generation_type = None, force_using_internet=False, previous_chunk="") -> LollmsContextDetails:
"""
Prepares the query for the model.
@ -1045,7 +1048,6 @@ class LollmsApplication(LoLLMsCom):
skills = []
documentation_entries = []
if self.personality.callback is None:
self.personality.callback = partial(self.process_data, client_id=client_id)
# Get the list of messages
@ -1087,7 +1089,7 @@ class LollmsApplication(LoLLMsCom):
if len(conditionning)>0:
if type(conditionning) is list:
conditionning = "\n".join(conditionning)
conditionning = self.system_full_header + self.personality.replace_keys(conditionning, self.personality.conditionning_commands) + ("" if conditionning[-1]==self.separator_template else self.separator_template)
conditionning = self.system_full_header + conditionning + ("" if conditionning[-1]==self.separator_template else self.separator_template)
# Check if there are document files to add to the prompt
internet_search_results = ""
@ -1353,6 +1355,34 @@ Answer directly with the reformulation of the last prompt.
n_user_description_tk = 0
function_calls = []
if len(self.config.mounted_function_calls)>0:
for fc in self.config.mounted_function_calls:
if fc["selected"]:
dr = Path(fc["dir"])
with open(dr/"config.yaml", "r") as f:
fc_dict = yaml.safe_load(f.read())
# Step 1: Construct the full path to the function.py module
module_path = dr / "function.py"
module_name = "function" # Name for the loaded module
# Step 2: Use importlib.util to load the module from the file path
spec = importlib.util.spec_from_file_location(module_name, module_path)
if spec is None:
raise ImportError(f"Could not load module from {module_path}")
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module # Add the module to sys.modules
spec.loader.exec_module(module) # Execute the module
# Step 3: Retrieve the class from the module using the class name
class_name = fc_dict["class_name"]
class_ = getattr(module, class_name)
# Step 4: Create an instance of the class and store it in fc_dict["class"]
fc_dict["class"] = class_(self, client)
function_calls.append(fc_dict)
# Calculate the total number of tokens between conditionning, documentation, and knowledge
total_tokens = n_cond_tk + n_isearch_tk + n_doc_tk + n_user_description_tk + n_positive_boost + n_negative_boost + n_fun_mode + n_think_first_mode
@ -1484,28 +1514,37 @@ Answer directly with the reformulation of the last prompt.
# Details
context_details = {
"client_id":client_id,
"conditionning":conditionning,
"internet_search_infos":internet_search_infos,
"internet_search_results":internet_search_results,
"documentation":documentation,
"documentation_entries":documentation_entries,
"user_description":user_description,
"discussion_messages":discussion_messages,
"positive_boost":positive_boost,
"negative_boost":negative_boost,
"current_language":self.config.current_language,
"fun_mode":fun_mode,
"think_first_mode":think_first_mode,
"ai_prefix":ai_prefix,
"extra":"",
"available_space":available_space,
"skills":skills_detials,
"is_continue":is_continue,
"previous_chunk":previous_chunk,
"prompt":current_message.content
}
context_details = LollmsContextDetails(
client=client,
conditionning=conditionning,
internet_search_infos=internet_search_infos,
internet_search_results=internet_search_results,
documentation=documentation,
documentation_entries=documentation_entries,
user_description=user_description,
discussion_messages=discussion_messages,
positive_boost=positive_boost,
negative_boost=negative_boost,
current_language=self.config.current_language,
fun_mode=fun_mode,
think_first_mode=think_first_mode,
ai_prefix=ai_prefix,
extra="",
available_space=available_space,
skills=skills_detials,
is_continue=is_continue,
previous_chunk=previous_chunk,
prompt=current_message.content,
function_calls=function_calls,
debug= self.config.debug,
ctx_size= self.config.ctx_size,
max_n_predict= self.config.max_n_predict,
model= self.model
)
if self.config.debug and not self.personality.processor:
ASCIIColors.highlight(documentation,"source_document_title", ASCIIColors.color_yellow, ASCIIColors.color_red, False)
# Return the prepared query, original message content, and tokenized query

View File

@ -4,7 +4,9 @@ from typing import Callable, Any
import socketio
from enum import Enum
from lollms.types import MSG_OPERATION_TYPE
from lollms.templating import LollmsLLMTemplate
from typing import Any, List
class NotificationType(Enum):
"""Notification types."""
@ -44,6 +46,7 @@ class LoLLMsCom:
self.sio= sio
self.verbose = verbose
self.config = None
self.template:LollmsLLMTemplate = None
self.tti = None
self.tts = None
self.stt = None

View File

@ -1,70 +1,106 @@
from functools import partial
from lollms.tasks import TasksLibrary
from typing import Dict, Any, List
class FunctionCalling_Library:
def __init__(self, tasks_library:TasksLibrary):
self.tl = tasks_library
self.function_definitions = []
from enum import Enum, auto
from lollms.client_session import Client
def register_function(self, function_name, function_callable, function_description, function_parameters):
self.function_definitions.append({
"function_name": function_name,
"function": function_callable,
"function_description": function_description,
"function_parameters": function_parameters
})
# Step 1: Define the FunctionType enum
class FunctionType(Enum):
CONTEXT_UPDATE = auto() # Adds information to the context
AI_FIRST_CALL = auto() # Called by the AI first, returns output, AI continues
CLASSIC = auto() # A classic function call with prompt
def unregister_function(self, function_name):
self.function_definitions = [func for func in self.function_definitions if func["function_name"] != function_name]
# Step 2: Update the FunctionCall base class
class FunctionCall:
def __init__(self, function_type: FunctionType, client: Client):
self.function_type = function_type
self.client = client
def execute_function_calls(self, function_calls: List[Dict[str, Any]]) -> List[Any]:
def execute(self, *args, **kwargs):
"""
Executes the function calls with the parameters extracted from the generated text,
using the original functions list to find the right function to execute.
Args:
function_calls (List[Dict[str, Any]]): A list of dictionaries representing the function calls.
function_definitions (List[Dict[str, Any]]): The original list of functions with their descriptions and callable objects.
Returns:
List[Any]: A list of results from executing the function calls.
Execute the function based on its type.
This method should be overridden by subclasses.
"""
results = []
# Convert function_definitions to a dict for easier lookup
functions_dict = {func['function_name']: func['function'] for func in self.function_definitions}
raise NotImplementedError("Subclasses must implement the execute method.")
for call in function_calls:
function_name = call.get("function_name")
parameters = call.get("function_parameters", [])
function = functions_dict.get(function_name)
def update_context(self, context, contructed_context:str):
"""
Update the context if needed.
This method should be overridden by subclasses.
"""
if self.function_type == FunctionType.CONTEXT_UPDATE:
raise NotImplementedError("Subclasses must implement the update_context method for CONTEXT_UPDATE functions.")
elif self.function_type == FunctionType.AI_FIRST_CALL:
raise NotImplementedError("Subclasses must implement the update_context method for AI_FIRST_CALL functions.")
elif self.function_type == FunctionType.POST_GENERATION:
raise NotImplementedError("Subclasses must implement the update_context method for POST_GENERATION functions.")
if function:
try:
# Assuming parameters is a dictionary that maps directly to the function's arguments.
if type(parameters)==list:
result = function(*parameters)
elif type(parameters)==dict:
result = function(**parameters)
results.append(result)
except TypeError as e:
# Handle cases where the function call fails due to incorrect parameters, etc.
results.append(f"Error calling {function_name}: {e}")
else:
results.append(f"Function {function_name} not found.")
return results
# from lollms.tasks import TasksLibrary
# class FunctionCalling_Library:
# def __init__(self, tasks_library:TasksLibrary):
# self.tl = tasks_library
# self.function_definitions = []
# def register_function(self, function_name, function_callable, function_description, function_parameters):
# self.function_definitions.append({
# "function_name": function_name,
# "function": function_callable,
# "function_description": function_description,
# "function_parameters": function_parameters
# })
# def unregister_function(self, function_name):
# self.function_definitions = [func for func in self.function_definitions if func["function_name"] != function_name]
# def execute_function_calls(self, function_calls: List[Dict[str, Any]]) -> List[Any]:
# """
# Executes the function calls with the parameters extracted from the generated text,
# using the original functions list to find the right function to execute.
# Args:
# function_calls (List[Dict[str, Any]]): A list of dictionaries representing the function calls.
# function_definitions (List[Dict[str, Any]]): The original list of functions with their descriptions and callable objects.
# Returns:
# List[Any]: A list of results from executing the function calls.
# """
# results = []
# # Convert function_definitions to a dict for easier lookup
# functions_dict = {func['function_name']: func['function'] for func in self.function_definitions}
# for call in function_calls:
# function_name = call.get("function_name")
# parameters = call.get("function_parameters", [])
# function = functions_dict.get(function_name)
# if function:
# try:
# # Assuming parameters is a dictionary that maps directly to the function's arguments.
# if type(parameters)==list:
# result = function(*parameters)
# elif type(parameters)==dict:
# result = function(**parameters)
# results.append(result)
# except TypeError as e:
# # Handle cases where the function call fails due to incorrect parameters, etc.
# results.append(f"Error calling {function_name}: {e}")
# else:
# results.append(f"Function {function_name} not found.")
# return results
def generate_with_functions(self, prompt):
# Assuming generate_with_function_calls is a method from TasksLibrary
ai_response, function_calls = self.tl.generate_with_function_calls(prompt, self.function_definitions)
return ai_response, function_calls
# def generate_with_functions(self, prompt):
# # Assuming generate_with_function_calls is a method from TasksLibrary
# ai_response, function_calls = self.tl.generate_with_function_calls(prompt, self.function_definitions)
# return ai_response, function_calls
def generate_with_functions_with_images(self, prompt, image_files):
# Assuming generate_with_function_calls_and_images is a method from TasksLibrary
if len(image_files) > 0:
ai_response, function_calls = self.tl.generate_with_function_calls_and_images(prompt, image_files, self.function_definitions)
else:
ai_response, function_calls = self.tl.generate_with_function_calls(prompt, self.function_definitions)
# def generate_with_functions_with_images(self, prompt, image_files):
# # Assuming generate_with_function_calls_and_images is a method from TasksLibrary
# if len(image_files) > 0:
# ai_response, function_calls = self.tl.generate_with_function_calls_and_images(prompt, image_files, self.function_definitions)
# else:
# ai_response, function_calls = self.tl.generate_with_function_calls(prompt, self.function_definitions)
return ai_response, function_calls
# return ai_response, function_calls

View File

@ -42,7 +42,7 @@ import sys
from lollms.com import LoLLMsCom
from lollms.helpers import trace_exception
from lollms.utilities import PackageManager
from lollms.prompting import LollmsContextDetails
import inspect
from lollms.code_parser import compress_js, compress_python, compress_html
@ -256,7 +256,7 @@ class AIPersonality:
def compute_n_predict(self, tokens):
return min(self.config.ctx_size-len(tokens)-1,self.config.max_n_predict if self.config.max_n_predict else self.config.ctx_size-len(tokens)-1)
def build_context(self, context_details, is_continue=False, return_tokens=False):
def build_context(self, context_details:LollmsContextDetails, is_continue:bool=False, return_tokens:bool=False):
# Build the final prompt by concatenating the conditionning and discussion messages
if self.config.use_assistant_name_in_discussion:
if self.config.use_model_name_in_discussions:
@ -269,23 +269,10 @@ class AIPersonality:
else:
ai_header = self.ai_custom_header("assistant")
elements = [
context_details["conditionning"],
context_details["internet_search_results"],
context_details["documentation"],
context_details["user_description"],
context_details["discussion_messages"],
context_details["positive_boost"],
context_details["negative_boost"],
context_details["fun_mode"],
context_details["think_first_mode"],
ai_header if not is_continue else '' if not self.config.use_continue_message \
else "CONTINUE FROM HERE And do not open a new markdown code tag." + self.separator_template + self.ai_full_header
]
# Filter out empty elements and join with separator
prompt_data = self.separator_template.join(element for element in elements if element)
prompt_data = context_details.build_prompt(self.app.template, [ai_header] if not is_continue else [] if not self.config.use_continue_message \
else ["CONTINUE FROM HERE And do not open a new markdown code tag." + self.separator_template + ai_header])
tokens = self.model.tokenize(prompt_data)
if return_tokens:
return prompt_data, tokens
@ -2178,13 +2165,6 @@ Don't forget encapsulate the code inside a html code tag. This is mandatory.
}
# ========================================== Properties ===========================================
@property
def conditionning_commands(self):
return {
"date_time": datetime.now().strftime("%A, %B %d, %Y %I:%M:%S %p"), # Replaces {{date}} with actual date
"date": datetime.now().strftime("%A, %B %d, %Y"), # Replaces {{date}} with actual date
"time": datetime.now().strftime("%H:%M:%S"), # Replaces {{time}} with actual time
}
@property
def logo(self):
@ -2364,7 +2344,7 @@ Don't forget encapsulate the code inside a html code tag. This is mandatory.
Returns:
str: The personality conditioning of the AI assistant.
"""
return self.replace_keys(self._personality_conditioning, self.conditionning_commands)
return self._personality_conditioning
@personality_conditioning.setter
def personality_conditioning(self, conditioning: str):
@ -2404,7 +2384,7 @@ Don't forget encapsulate the code inside a html code tag. This is mandatory.
Returns:
str: The welcome message of the AI assistant.
"""
return self.replace_keys(self._welcome_message, self.conditionning_commands)
return self._welcome_message
@welcome_message.setter
def welcome_message(self, message: str):
@ -2792,34 +2772,6 @@ Don't forget encapsulate the code inside a html code tag. This is mandatory.
return prompt.lower()
return None
# Helper functions
@staticmethod
def replace_keys(input_string, replacements):
"""
Replaces all occurrences of keys in the input string with their corresponding
values from the replacements dictionary.
Args:
input_string (str): The input string to replace keys in.
replacements (dict): A dictionary of key-value pairs, where the key is the
string to be replaced and the value is the replacement string.
Returns:
str: The input string with all occurrences of keys replaced by their
corresponding values.
"""
pattern = r"\{\{(\w+)\}\}"
# The pattern matches "{{key}}" and captures "key" in a group.
# The "\w+" matches one or more word characters (letters, digits, or underscore).
def replace(match):
key = match.group(1)
return replacements.get(key, match.group(0))
output_string = re.sub(pattern, replace, input_string)
return output_string
def verify_rag_entry(self, query, rag_entry):
return self.yes_no("Are there any useful information in the document chunk that can be used to answer the query?", self.app.system_custom_header("Query")+query+"\n"+self.app.system_custom_header("document chunk")+"\n"+rag_entry)
@ -3700,7 +3652,7 @@ class APScript(StateMachine):
return self.personality.generate_structured_content(prompt, images, template, output_format, callback)
def run_workflow(self, context_details:dict=None, client:Client=None, callback: Callable[[str | list | None, MSG_OPERATION_TYPE, str, AIPersonality| None], bool]=None):
def run_workflow(self, context_details:LollmsContextDetails=None, client:Client=None, callback: Callable[[str | list | None, MSG_OPERATION_TYPE, str, AIPersonality| None], bool]=None):
"""
This function generates code based on the given parameters.
@ -4046,106 +3998,6 @@ class APScript(StateMachine):
summeries.append(summary)
self.step_end(f" Summary of {doc_name} - Processing chunk : {i+1}/{len(chunks)}")
return "\n".join(summeries)
def build_prompt_from_context_details(self, context_details:dict, custom_entries="", suppress=[]):
"""
Builds a prompt from the provided context details.
This function concatenates various parts of the context into a single string, which is then used to build a prompt.
The context details can include conditioning, documentation, knowledge, user description, positive and negative boosts,
current language, fun mode, discussion window, and any extra information.
Parameters:
context_details (dict): A dictionary containing various context details.
custom_entries (str): Additional custom entries to be included in the prompt.
Returns:
str: The constructed prompt.
Raises:
KeyError: If any required key is missing in the context_details dictionary.
"""
full_context = []
sacrifice_id = 0
if context_details["conditionning"] and "conditionning" not in suppress:
full_context.append( self.separator_template.join([
context_details["conditionning"]
]))
sacrifice_id += 1
if context_details["documentation"] and "documentation" not in suppress:
full_context.append( self.separator_template.join([
self.system_custom_header("documentation"),
context_details["documentation"]
]))
sacrifice_id += 1
if context_details["user_description"] and "user_description" not in suppress:
full_context.append( self.separator_template.join([
self.system_custom_header("user_description"),
context_details["user_description"]
]))
sacrifice_id += 1
if context_details["positive_boost"] and "positive_boost" not in suppress:
full_context.append( self.separator_template.join([
self.system_custom_header("positive_boost"),
context_details["positive_boost"]
]))
sacrifice_id += 1
if context_details["negative_boost"] and "negative_boost" not in suppress:
full_context.append( self.separator_template.join([
self.system_custom_header("negative_boost"),
context_details["negative_boost"]
]))
sacrifice_id += 1
if context_details["current_language"] and "current_language" not in suppress:
full_context.append( self.separator_template.join([
self.system_custom_header("current_language"),
context_details["current_language"]
]))
sacrifice_id += 1
if context_details["fun_mode"] and "fun_mode" not in suppress:
full_context.append( self.separator_template.join([
self.system_custom_header("fun_mode"),
context_details["fun_mode"]
]))
sacrifice_id += 1
if context_details["discussion_messages"] and "discussion_messages" not in suppress:
full_context.append( self.separator_template.join([
self.system_custom_header("discussion_messages"),
context_details["discussion_messages"]
]))
if context_details["extra"] and "extra" not in suppress:
full_context.append( self.separator_template.join([
context_details["extra"]
]))
if custom_entries:
full_context.append( self.separator_template.join([
custom_entries
]))
if "ai_prefix" not in suppress:
full_context.append( self.separator_template.join([
self.ai_custom_header(context_details["ai_prefix"])
]))
prompt = self.build_prompt(full_context, sacrifice_id)
if self.config.debug:
nb_prompt_tokens = len(self.personality.model.tokenize(prompt))
nb_tokens = min(self.config.ctx_size - nb_prompt_tokens, self.config.max_n_predict if self.config.max_n_predict else self.config.ctx_size-nb_prompt_tokens)
ASCIIColors.info(f"Prompt size : {nb_prompt_tokens}")
ASCIIColors.info(f"Requested generation max size : {nb_tokens}")
return prompt
def build_prompt(self, prompt_parts:List[str], sacrifice_id:int=-1, context_size:int=None, minimum_spare_context_size:int=None):
"""
@ -5294,18 +5146,18 @@ transition-all duration-300 ease-in-out">
return rounds_info
def answer(self, context_details, custom_entries = "", send_full=True, callback=None):
if context_details["is_continue"]:
full_prompt = self.build_prompt_from_context_details(context_details, custom_entries=custom_entries, suppress= ["ai_prefix"])
def answer(self, context_details:LollmsContextDetails, custom_entries = "", send_full=True, callback=None):
if context_details.is_continue:
full_prompt = context_details.build_prompt(self.personality.app.template, custom_entries=custom_entries, suppress= ["ai_prefix"])
else:
full_prompt = self.build_prompt_from_context_details(context_details, custom_entries=custom_entries)
full_prompt = context_details.build_prompt(self.personality.app.template, custom_entries=custom_entries)
out = self.fast_gen(full_prompt)
nb_tokens = len(self.personality.model.tokenize(out))
if nb_tokens >= (self.config.max_n_predict if self.config.max_n_predict else self.config.ctx_size)-1:
out = out+self.fast_gen(full_prompt+out, callback=callback)
if context_details["is_continue"]:
out = context_details["previous_chunk"] + out
if context_details.is_continue:
out = context_details.previous_chunk + out
if send_full:
self.set_message_content(out)
return out
@ -5458,7 +5310,7 @@ transition-all duration-300 ease-in-out">
return tools
def _upgrade_prompt_with_function_info(self, context_details: dict, functions: List[Dict[str, Any]]) -> str:
def _upgrade_prompt_with_function_info(self, context_details: LollmsContextDetails, functions: List[Dict[str, Any]]) -> str:
"""
Upgrades the prompt with information about function calls.
@ -5492,8 +5344,8 @@ transition-all duration-300 ease-in-out">
# Combine the function descriptions with the original prompt.
function_info = '\n'.join(function_descriptions)
cd["conditionning"]=function_info
upgraded_prompt = self.build_prompt_from_context_details(cd)
cd.conditionning=function_info
upgraded_prompt = cd.build_prompt(self.personality.app.template)
return upgraded_prompt
@ -5533,10 +5385,10 @@ transition-all duration-300 ease-in-out">
def interact(
self,
context_details,
context_details:LollmsContextDetails,
callback = None
):
upgraded_prompt = self.build_prompt_from_context_details(context_details)
upgraded_prompt = context_details.build_prompt(self.personality.app.template)
if len(self.personality.image_files)>0:
# Generate the initial text based on the upgraded prompt.
generated_text = self.fast_gen_with_images(upgraded_prompt, self.personality.image_files, callback=callback)
@ -5577,7 +5429,7 @@ transition-all duration-300 ease-in-out">
if separate_output:
self.set_message_content(final_output)
self.new_message("")
context_details["discussion_messages"] +=out
context_details.discussion_messages +=out
if len(self.personality.image_files)>0:
out, function_calls, twc = self.generate_with_function_calls_and_images(context_details, self.personality.image_files, function_definitions, callback=callback)
else:

209
lollms/prompting.py Normal file
View File

@ -0,0 +1,209 @@
from enum import Enum, auto
from typing import Dict, List, Optional
from ascii_colors import ASCIIColors
from lollms.templating import LollmsLLMTemplate
from lollms.function_call import FunctionCall, FunctionType
from lollms.client_session import Client
class LollmsContextDetails:
"""
A class to manage context details for interactions with a Large Language Model (LLM).
Attributes:
client (str): Unique identifier for the client or user interacting with the LLM.
conditionning (Optional[str]): Optional field to store conditioning information or context for the LLM.
internet_search_infos (Optional[Dict]): Dictionary to store metadata or details about internet searches performed by the LLM.
internet_search_results (Optional[List]): List to store the results of internet searches performed by the LLM.
documentation (Optional[str]): Optional field to store documentation or reference material for the LLM.
documentation_entries (Optional[List]): List to store individual entries or sections of documentation.
user_description (Optional[str]): Optional field to store a description or profile of the user.
discussion_messages (Optional[List]): List to store the history of messages in the current discussion or conversation.
positive_boost (Optional[float]): Optional field to store a positive boost value for influencing LLM behavior.
negative_boost (Optional[float]): Optional field to store a negative boost value for influencing LLM behavior.
current_language (Optional[str]): Optional field to store the current language being used in the interaction.
fun_mode (Optional[bool]): Optional boolean field to enable or disable "fun mode" for the LLM.
think_first_mode (Optional[bool]): Optional boolean field to enable or disable "think first mode" for the LLM.
ai_prefix (Optional[str]): Optional field to store a prefix or identifier for the AI in the conversation.
extra (Optional[str]): Optional field to store additional custom or extra information.
available_space (Optional[int]): Optional field to store the available space or capacity for the context.
skills (Optional[Dict]): Dictionary to store skills or capabilities of the LLM.
is_continue (Optional[bool]): Optional boolean field to indicate if the LLM is continuing from a previous chunk or context.
previous_chunk (Optional[str]): Optional field to store the previous chunk of text or context.
prompt (Optional[str]): Optional field to store the current prompt or input for the LLM.
function_calls (Optional[List]): List to store function calls or actions performed by the LLM.
debug (bool): Enable or disable debug mode.
ctx_size (int): The maximum context size for the LLM.
max_n_predict (Optional[int]): The maximum number of tokens to generate.
model : The model (required to perform tokenization)
"""
def __init__(
self,
client:Client,
conditionning: Optional[str] = None,
internet_search_infos: Optional[Dict] = None,
internet_search_results: Optional[List] = None,
documentation: Optional[str] = None,
documentation_entries: Optional[List] = None,
user_description: Optional[str] = None,
discussion_messages: Optional[List] = None,
positive_boost: Optional[float] = None,
negative_boost: Optional[float] = None,
current_language: Optional[str] = None,
fun_mode: Optional[bool] = False,
think_first_mode: Optional[bool] = False,
ai_prefix: Optional[str] = None,
extra: Optional[str] = "",
available_space: Optional[int] = None,
skills: Optional[Dict] = None,
is_continue: Optional[bool] = False,
previous_chunk: Optional[str] = None,
prompt: Optional[str] = None,
function_calls: Optional[List] = None,
debug: bool = False,
ctx_size: int = 2048,
max_n_predict: Optional[int] = None,
model = None
):
"""Initialize the LollmsContextDetails instance with the provided attributes."""
self.client = client
self.conditionning = conditionning
self.internet_search_infos = internet_search_infos if internet_search_infos is not None else {}
self.internet_search_results = internet_search_results if internet_search_results is not None else []
self.documentation = documentation
self.documentation_entries = documentation_entries if documentation_entries is not None else []
self.user_description = user_description
self.discussion_messages = discussion_messages if discussion_messages is not None else []
self.positive_boost = positive_boost
self.negative_boost = negative_boost
self.current_language = current_language
self.fun_mode = fun_mode
self.think_first_mode = think_first_mode
self.ai_prefix = ai_prefix
self.extra = extra
self.available_space = available_space
self.skills = skills if skills is not None else {}
self.is_continue = is_continue
self.previous_chunk = previous_chunk
self.prompt = prompt
self.function_calls:List[dict] = function_calls if function_calls is not None else []
self.debug = debug
self.ctx_size = ctx_size
self.max_n_predict = max_n_predict
self.model = model
def transform_function_to_text(self, template, func):
function_texts = []
# Function header
function_text = template.system_custom_header("Function") + f'\nfunction_name: {func["name"]}\nfunction_description: {func["description"]}\n'
# Parameters section
function_text += "function_parameters:\n"
for param in func["parameters"]:
param_type = "string" if param["type"] == "str" else param["type"]
param_description = param.get("description", "")
function_text += f' - {param["name"]} ({param_type}): {param_description}\n'
function_texts.append(function_text.strip())
return "\n\n".join(function_texts)
def build_prompt(self, template: LollmsLLMTemplate, custom_entries: str = "", suppress: List[str] = []) -> str:
"""
Builds a prompt from the context details using the integrated template system.
Args:
template (LollmsLLMTemplate): The template system to use for constructing the prompt.
custom_entries (str): Additional custom entries to be included in the prompt.
suppress (List[str]): A list of fields to exclude from the prompt.
Returns:
str: The constructed prompt.
"""
full_context = []
sacrifice_id = 0
def append_context(field_name: str, header: Optional[str] = None):
"""
Helper function to append context if the field is not suppressed.
Args:
field_name (str): The name of the field to append.
header (Optional[str]): An optional header to prepend to the field content.
"""
if getattr(self, field_name) and field_name not in suppress:
content:str = getattr(self, field_name)
if header:
full_context.append(header+ content.strip())
else:
full_context.append(content.strip())
nonlocal sacrifice_id
sacrifice_id += 1
# Append each field to the full context if it exists and is not suppressed
append_context("conditionning")
append_context("documentation", template.system_custom_header("documentation"))
append_context("user_description", template.system_custom_header("user_description"))
append_context("positive_boost", template.system_custom_header("positive_boost"))
append_context("negative_boost", template.system_custom_header("negative_boost"))
append_context("current_language", template.system_custom_header("current_language"))
append_context("fun_mode", template.system_custom_header("fun_mode"))
append_context("think_first_mode", template.system_custom_header("think_first_mode"))
append_context("extra")
found_classic_function = False
for function_call in self.function_calls:
fc:FunctionCall = function_call["class"]
if fc.function_type == FunctionType.CONTEXT_UPDATE:
full_context = fc.update_context(self, full_context)
elif fc.function_type == FunctionType.CLASSIC:
if not found_classic_function:
found_classic_function = True
full_context.append(self.transform_function_to_text(template,function_call))
if found_classic_function:
full_context.append(
template.system_custom_header("Function Calls")+"\n" + "\n".join([
"You have access to functions presented to you in the available functions listed above.",
"If you need to call a function, use this exact syntax:",
"```function",
"{",
' "function_name": "name_of_the_function_to_call",',
' "function_parameters": {',
' "parameter1": value1,',
' "parameter2": value2',
" }",
"}",
"```",
"Important Notes:",
"- **Always** enclose the function call in a `function` markdown code block.",
"- Make sure the content of the function markdown code block is a valid json.",
])
)
append_context("discussion_messages", template.system_custom_header("Discussion")+"\n")
# Add custom entries if provided
if custom_entries:
full_context+=custom_entries
# Build the final prompt
if self.model is None:
prompt = template.separator_template.join(full_context)
else:
prompt = template.separator_template.join(full_context)
# Debugging information
if self.debug and self.model:
nb_prompt_tokens = len(self.model.tokenize(prompt))
nb_tokens = min(
self.ctx_size - nb_prompt_tokens,
self.max_n_predict if self.max_n_predict else self.ctx_size - nb_prompt_tokens
)
print(f"Prompt size : {nb_prompt_tokens}")
print(f"Requested generation max size : {nb_tokens}")
return prompt

View File

@ -17,6 +17,7 @@ from lollms.binding import BindingBuilder, InstallOption
from ascii_colors import ASCIIColors
from lollms.utilities import load_config, trace_exception, gc, show_yes_no_dialog
from lollms.security import check_access
from lollms.templating import LollmsLLMTemplate
from pathlib import Path
from typing import List
import json
@ -193,6 +194,9 @@ async def apply_settings(request: Request):
lollmsElfServer.verify_servers()
if lollmsElfServer.config.auto_save:
lollmsElfServer.config.save_config()
lollmsElfServer.template = LollmsLLMTemplate(lollmsElfServer.config, lollmsElfServer.personality)
return {"status":True}
except Exception as ex:
trace_exception(ex)

121
lollms/templating.py Normal file
View File

@ -0,0 +1,121 @@
class LollmsLLMTemplate:
"""
A template class for managing LLM (Large Language Model) headers, separators, and message templates.
This class provides properties and methods to generate headers and templates for system, user, and AI messages.
"""
def __init__(self, config, personality):
"""
Initialize the LollmsLLMTemplate class.
Parameters:
config (object): A configuration object containing template-related settings.
personality (object): A personality object containing AI-specific settings (e.g., AI name).
"""
self.config = config
self.personality = personality
# Properties ===============================================
@property
def start_header_id_template(self) -> str:
"""Get the start_header_id_template."""
return self.config.start_header_id_template
@property
def end_header_id_template(self) -> str:
"""Get the end_header_id_template."""
return self.config.end_header_id_template
@property
def system_message_template(self) -> str:
"""Get the system_message_template."""
return self.config.system_message_template
@property
def separator_template(self) -> str:
"""Get the separator template."""
return self.config.separator_template
@property
def start_user_header_id_template(self) -> str:
"""Get the start_user_header_id_template."""
return self.config.start_user_header_id_template
@property
def end_user_header_id_template(self) -> str:
"""Get the end_user_header_id_template."""
return self.config.end_user_header_id_template
@property
def end_user_message_id_template(self) -> str:
"""Get the end_user_message_id_template."""
return self.config.end_user_message_id_template
@property
def start_ai_header_id_template(self) -> str:
"""Get the start_ai_header_id_template."""
return self.config.start_ai_header_id_template
@property
def end_ai_header_id_template(self) -> str:
"""Get the end_ai_header_id_template."""
return self.config.end_ai_header_id_template
@property
def end_ai_message_id_template(self) -> str:
"""Get the end_ai_message_id_template."""
return self.config.end_ai_message_id_template
@property
def system_full_header(self) -> str:
"""Generate the full system header."""
return f"{self.start_header_id_template}{self.system_message_template}{self.end_header_id_template}"
@property
def user_full_header(self) -> str:
"""Generate the full user header."""
return f"{self.start_user_header_id_template}{self.config.user_name}{self.end_user_header_id_template}"
@property
def ai_full_header(self) -> str:
"""Generate the full AI header."""
return f"{self.start_ai_header_id_template}{self.personality.name}{self.end_ai_header_id_template}"
# Methods ===============================================
def system_custom_header(self, ai_name: str) -> str:
"""
Generate a custom system header with the specified AI name.
Parameters:
ai_name (str): The name of the AI to include in the header.
Returns:
str: The custom system header.
"""
return f"{self.start_header_id_template}{ai_name}{self.end_user_header_id_template}"
def user_custom_header(self, ai_name: str) -> str:
"""
Generate a custom user header with the specified AI name.
Parameters:
ai_name (str): The name of the AI to include in the header.
Returns:
str: The custom user header.
"""
return f"{self.start_user_header_id_template}{ai_name}{self.end_user_header_id_template}"
def ai_custom_header(self, ai_name: str) -> str:
"""
Generate a custom AI header with the specified AI name.
Parameters:
ai_name (str): The name of the AI to include in the header.
Returns:
str: The custom AI header.
"""
return f"{self.start_ai_header_id_template}{ai_name}{self.end_ai_header_id_template}"