upgraded code removed junk stuff

This commit is contained in:
Saifeddine ALOUI 2025-04-04 17:24:47 +02:00
parent 863dfa261b
commit 355c91aaf4
4 changed files with 133 additions and 442 deletions

View File

@ -1,5 +1,14 @@
# =================== Lord Of Large Language Multimodal Systems Configuration file ===========================
version: 160
version: 161
# topbar
current_language: english
# skills library
activate_skills_lib: false # Activate vectorizing previous conversations
skills_lib_database_name: "default" # Default skills database
max_summary_size: 512 # in tokens
# video viewing and news recovering
last_viewed_video: null
@ -22,6 +31,9 @@ turn_on_language_validation: true
force_accept_remote_access: false
# Execution engines parameters
pdf_latex_path: null
# Server information
headless_server_mode: false
allowed_origins: []
@ -73,12 +85,12 @@ personalities: ["generic/lollms"]
active_personality_id: 0
override_personality_model_parameters: false #if true the personality parameters are overriden by those of the configuration (may affect personality behaviour)
extensions: []
# interaction parameters
user_name: user
user_description: ""
use_assistant_name_in_discussion: false
use_user_name_in_discussions: false
use_assistant_name_in_discussion: true
use_user_name_in_discussions: true
use_model_name_in_discussions: false
user_avatar: null
use_user_informations_in_discussion: false
@ -86,166 +98,24 @@ use_user_informations_in_discussion: false
# UI parameters
discussion_db_name: default
# Automatic updates
#automatic stuff
auto_show_browser: true
auto_save: true
auto_title: false
# debug information
debug: false
debug_show_final_full_prompt: false
debug_show_chunks: false
debug_log_file_path: ""
# Automatic updates
auto_update: true
auto_sync_personalities: true
auto_sync_extensions: true
auto_sync_bindings: true
auto_sync_models: true
auto_save: true
auto_title: false
# Install mode (cpu, cpu-noavx, nvidia-tensorcores, nvidia, amd-noavx, amd, apple-intel, apple-silicon)
hardware_mode: nvidia-tensorcores
# Automatically open the browser
auto_show_browser: true
# copy to clipboard
copy_to_clipboard_add_all_details: false
# -------------------- Services global configurations --------------------------
# Select the active test to speach, text to image and speach to text services
active_tts_service: "None" # xtts (offline), openai_tts (API key required), elevenlabs_tts, fish_tts (API key required)
active_tti_service: "None" # autosd (offline), diffusers (offline), diffusers_client (online), dall-e (online), midjourney (online)
active_stt_service: "None" # whisper (offline), asr (offline or online), openai_whiosper (API key required)
active_ttm_service: "None" # musicgen (offline)
active_ttv_service: "None" # novita_ai, cog_video_x, diffusers, lumalab (offline)
# -------------------- Services --------------------------
# ***************** STT *****************
stt_input_device: 0
# STT service
stt_listening_threshold: 1000
stt_silence_duration: 2
stt_sound_threshold_percentage: 10
stt_gain: 1.0
stt_rate: 44100
stt_channels: 1
stt_buffer_size: 10
stt_activate_word_detection: false
stt_word_detection_file: null
# ASR STT service
asr_enable: false
asr_base_url: http://localhost:9000
# openai_whisper configuration
openai_whisper_key: ""
openai_whisper_model: "whisper-1"
# whisper configuration
whisper_activate: false
whisper_model: base
# ***************** TTS *****************
tts_output_device: 0
# Voice service
auto_read: false
xtts_current_voice: null
xtts_current_language: en
xtts_stream_chunk_size: 100
xtts_temperature: 0.75
xtts_length_penalty: 1.0
xtts_repetition_penalty: 5.0
xtts_top_k: 50
xtts_top_p: 0.85
xtts_speed: 1
xtts_enable_text_splitting: true
xtts_freq: 22050
# openai_whisper configuration
openai_tts_key: ""
openai_tts_model: "tts-1"
openai_tts_voice: "alloy"
elevenlabs_tts_key: ""
elevenlabs_tts_model_id: "eleven_turbo_v2_5"
elevenlabs_tts_voice_stability: 0.5
elevenlabs_tts_voice_boost: 0.5
elevenlabs_tts_voice_id: EXAVITQu4vr4xnSDxMaL
fish_tts_key: ""
fish_tts_voice: "default"
# ***************** TTI *****************
use_negative_prompt: true
use_ai_generated_negative_prompt: false
negative_prompt_generation_prompt: Generate negative prompt for the following prompt. negative prompt is a set of words that describe things we do not want to have in the generated image.
default_negative_prompt: (((text))), (((ugly))), (((duplicate))), ((morbid)), ((mutilated)), out of frame, extra fingers, mutated hands, ((poorly drawn hands)), ((poorly drawn face)), (((mutation))), (((deformed))), blurry, ((bad anatomy)), (((bad proportions))), ((extra limbs)), cloned face, (((disfigured))), ((extra arms)), (((extra legs))), mutated hands, (fused fingers), (too many fingers), (((long neck))), ((watermark)), ((robot eyes))
# Image generation service
enable_sd_service: false
sd_base_url: http://localhost:7860
# Image generation service
enable_fooocus_service: false
fooocus_base_url: http://localhost:7860
# diffusers
diffusers_offloading_mode: sequential_cpu_offload # sequential_cpu_offload
diffusers_model: v2ray/stable-diffusion-3-medium-diffusers
# diffusers client
diffusers_client_base_url: http://localhost:8593
# Dall e service key
dall_e_key: ""
dall_e_generation_engine: "dall-e-3"
# Midjourney service key
midjourney_key: ""
midjourney_timeout: 300
midjourney_retries: 1
# Image generation service comfyui
enable_comfyui_service: false
comfyui_base_url: http://127.0.0.1:8188/
comfyui_model: v1-5-pruned-emaonly.ckpt
# Motion control service
enable_motion_ctrl_service: false
motion_ctrl_base_url: http://localhost:7861
# ***************** TTV *****************
# Novita_ai configuration
novita_ai_key: ""
cog_video_x_model: "THUDM/CogVideoX-5b"
# lumalabs configuration
lumalabs_key: ""
# ***************** TTT *****************
# ollama service
enable_ollama_service: false
ollama_base_url: http://localhost:11434
# petals service
enable_petals_service: false
petals_base_url: http://localhost:8064
petals_model_path: TinyLlama/TinyLlama-1.1B-Chat-v1.0
petals_device: cuda
# lollms service
enable_lollms_service: false
lollms_access_keys : [] # set a list of keys separated by coma to restrict access
@ -260,25 +130,52 @@ activate_ollama_emulator: true
activate_openai_emulator: true
activate_mistralai_emulator: true
# Install mode (cpu, cpu-noavx, nvidia-tensorcores, nvidia, amd-noavx, amd, apple-intel, apple-silicon)
hardware_mode: nvidia-tensorcores
# copy to clipboard
copy_to_clipboard_add_all_details: false
# -------------------- Services global configurations --------------------------
# Select the active test to speach, text to image and speach to text services
active_tts_service: "None" # xtts (offline), openai_tts (API key required), elevenlabs_tts, fish_tts (API key required)
active_tti_service: "None" # autosd (offline), diffusers (offline), diffusers_client (online), dall-e (online), midjourney (online)
active_stt_service: "None" # whisper (offline), asr (offline or online), openai_whiosper (API key required)
active_ttm_service: "None" # musicgen (offline)
active_ttv_service: "None" # novita_ai, cog_video_x, diffusers, lumalab (offline)
# -------------------- Services --------------------------
# ***************** STT *****************
stt_input_device: 0
stt_listening_threshold: 1000
stt_silence_duration: 2
stt_sound_threshold_percentage: 10
stt_gain: 1.0
stt_rate: 44100
stt_channels: 1
stt_buffer_size: 10
stt_activate_word_detection: false
stt_word_detection_file: null
# ***************** TTS *****************
tts_output_device: 0
auto_read: false
# ***************** TTV *****************
# ***************** TTT *****************
# Smart router
use_smart_routing: false
smart_routing_router_model : ""
smart_routing_models_description : {}
restore_model_after_smart_routing : false
# elastic search service
elastic_search_service: false
elastic_search_url: http://localhost:9200
# vll service
enable_vllm_service: false
vllm_url: http://localhost:8000
vllm_model_path: TinyLlama/TinyLlama-1.1B-Chat-v1.0
vllm_gpu_memory_utilization: 0.9
vllm_max_model_len: 4096
vllm_max_num_seqs: 256
# Audio
media_on: false
audio_in_language: 'en-US'
@ -320,11 +217,6 @@ rag_vectorizer_openai_key: "" # The open ai key (if not provided, this will use
contextual_summary: false #If activated this will completely replace the rag and instead will use contextual summary
activate_skills_lib: false # Activate vectorizing previous conversations
skills_lib_database_name: "default" # Default skills database
max_summary_size: 512 # in tokens
rag_put_chunk_informations_into_context: false # if true then each chunk will be preceded by its information which may waste some context space but allow the ai to point where it found th einformation
rag_build_keys_words: true # If true, when querrying the database, we use keywords generated from the user prompt instead of the prompt itself.
@ -337,13 +229,11 @@ internet_vectorization_nb_chunks: 4 # number of chunks to use
internet_nb_search_pages: 8 # number of pages to select
internet_quick_search: false # If active the search engine will not load and read the webpages
internet_activate_search_decision: false # If active the ai decides by itself if it needs to do search
# Helpers
pdf_latex_path: null
# boosting information
positive_boost: null
negative_boost: null
current_language: english
fun_mode: false
think_first_mode: false
thinking_prompt: "Use a think first process to answer the user:

View File

@ -72,7 +72,6 @@ class LollmsApplication(LoLLMsCom):
self.mounted_personalities = []
self.personality:AIPersonality = None
self.mounted_extensions = []
self.binding = None
self.model:LLMBinding = None
self.long_term_memory = None
@ -180,7 +179,6 @@ class LollmsApplication(LoLLMsCom):
ASCIIColors.warning(f"Couldn't load binding {self.config.binding_name}.")
self.mount_personalities()
self.mount_extensions()
try:
self.load_rag_dbs()
@ -594,56 +592,12 @@ class LollmsApplication(LoLLMsCom):
self.warning(f"Couldn't start lightrag")
ASCIIColors.execute_with_animation("Loading RAG servers", start_local_services,ASCIIColors.color_blue)
tts_services = []
stt_services = []
def start_ttt(*args, **kwargs):
if self.config.enable_ollama_service:
try:
from lollms.services.ttt.ollama.lollms_ollama import Service
self.ollama = Service(self, base_url=self.config.ollama_base_url)
tts_services.append("ollama")
except Exception as ex:
trace_exception(ex)
self.warning(f"Couldn't load Ollama")
if self.config.enable_vllm_service:
try:
from lollms.services.ttt.vllm.lollms_vllm import Service
self.vllm = Service(self, base_url=self.config.vllm_url)
tts_services.append("vllm")
except Exception as ex:
trace_exception(ex)
self.warning(f"Couldn't load vllm")
ASCIIColors.execute_with_animation("Loading TTT services", start_ttt,ASCIIColors.color_blue)
if self.config.active_stt_service:
def start_stt(*args, **kwargs):
self.stt = self.load_service_from_folder(self.lollms_paths.services_zoo_path/"stt", self.config.active_stt_service)
ASCIIColors.execute_with_animation("Loading loacal STT services", start_stt, ASCIIColors.color_blue)
# def start_tts(*args, **kwargs):
# if self.config.active_tts_service == "xtts":
# ASCIIColors.yellow("Loading XTTS")
# try:
# from lollms.services.tts.xtts.lollms_xtts import LollmsXTTS
# self.tts = LollmsXTTS(
# self
# )
# except Exception as ex:
# trace_exception(ex)
# self.warning(f"Couldn't load XTTS")
# if self.config.active_tts_service == "eleven_labs_tts":
# from lollms.services.tts.eleven_labs_tts.lollms_eleven_labs_tts import LollmsElevenLabsTTS
# self.tts = LollmsElevenLabsTTS(self)
# elif self.config.active_tts_service == "openai_tts":
# from lollms.services.tts.open_ai_tts.lollms_openai_tts import LollmsOpenAITTS
# self.tts = LollmsOpenAITTS(self)
# elif self.config.active_tts_service == "fish_tts":
# from lollms.services.tts.fish.lollms_fish_tts import LollmsFishAudioTTS
# self.tts = LollmsFishAudioTTS(self)
if self.config.active_tts_service:
def start_tts(*args, **kwargs):
self.tti = self.load_service_from_folder(self.lollms_paths.services_zoo_path/"tts", self.config.active_tts_service)
@ -1004,31 +958,11 @@ class LollmsApplication(LoLLMsCom):
self.config.active_personality_id = 0
self.personality = self.mounted_personalities[self.config.active_personality_id]
def mount_extensions(self, callback = None):
self.mounted_extensions = []
to_remove = []
for i in range(len(self.config["extensions"])):
p = self.mount_extension(i, callback = None)
if p is None:
to_remove.append(i)
to_remove.sort(reverse=True)
for i in to_remove:
self.unmount_extension(i)
def set_personalities_callbacks(self, callback: Callable[[str, int, dict], bool]=None):
for personality in self.mount_personalities:
personality.setCallback(callback)
def unmount_extension(self, id:int)->bool:
if id<len(self.config.extensions):
del self.config.extensions[id]
if id>=0 and id<len(self.mounted_extensions):
del self.mounted_extensions[id]
self.config.save_config()
return True
else:
return False
def unmount_personality(self, id:int)->bool:
@ -1183,30 +1117,7 @@ class LollmsApplication(LoLLMsCom):
current_message = messages[message_index]
# Build the conditionning text block
default_language = self.personality.language.lower().strip().split()[0]
current_language = self.config.current_language.lower().strip().split()[0]
if current_language and current_language!= self.personality.language:
language_path = self.lollms_paths.personal_configuration_path/"personalities"/self.personality.name/f"languages_{current_language}.yaml"
if not language_path.exists():
self.info(f"This is the first time this personality speaks {current_language}\nLollms is reconditionning the persona in that language.\nThis will be done just once. Next time, the personality will speak {current_language} out of the box")
language_path.parent.mkdir(exist_ok=True, parents=True)
# Translating
conditionning = self.tasks_library.translate_conditionning(self.personality._personality_conditioning, self.personality.language, current_language)
welcome_message = self.tasks_library.translate_message(self.personality.welcome_message, self.personality.language, current_language)
with open(language_path,"w",encoding="utf-8", errors="ignore") as f:
yaml.safe_dump({"personality_conditioning":conditionning,"welcome_message":welcome_message}, f)
else:
with open(language_path,"r",encoding="utf-8", errors="ignore") as f:
language_pack = yaml.safe_load(f)
conditionning = language_pack.get("personality_conditioning", language_pack.get("conditionning", self.personality.personality_conditioning))
else:
conditionning = self.personality._personality_conditioning
if len(conditionning)>0:
if type(conditionning) is list:
conditionning = "\n".join(conditionning)
conditionning = self.system_full_header + conditionning + ("" if conditionning[-1]==self.separator_template else self.separator_template)
conditionning = self.personality.personality_conditioning
block_rag = False
function_calls = []

View File

@ -1,5 +1,14 @@
# =================== Lord Of Large Language Multimodal Systems Configuration file ===========================
version: 160
version: 161
# topbar
current_language: english
# skills library
activate_skills_lib: false # Activate vectorizing previous conversations
skills_lib_database_name: "default" # Default skills database
max_summary_size: 512 # in tokens
# video viewing and news recovering
last_viewed_video: null
@ -22,6 +31,9 @@ turn_on_language_validation: true
force_accept_remote_access: false
# Execution engines parameters
pdf_latex_path: null
# Server information
headless_server_mode: false
allowed_origins: []
@ -73,12 +85,12 @@ personalities: ["generic/lollms"]
active_personality_id: 0
override_personality_model_parameters: false #if true the personality parameters are overriden by those of the configuration (may affect personality behaviour)
extensions: []
# interaction parameters
user_name: user
user_description: ""
use_assistant_name_in_discussion: false
use_user_name_in_discussions: false
use_assistant_name_in_discussion: true
use_user_name_in_discussions: true
use_model_name_in_discussions: false
user_avatar: null
use_user_informations_in_discussion: false
@ -86,166 +98,24 @@ use_user_informations_in_discussion: false
# UI parameters
discussion_db_name: default
# Automatic updates
#automatic stuff
auto_show_browser: true
auto_save: true
auto_title: false
# debug information
debug: false
debug_show_final_full_prompt: false
debug_show_chunks: false
debug_log_file_path: ""
# Automatic updates
auto_update: true
auto_sync_personalities: true
auto_sync_extensions: true
auto_sync_bindings: true
auto_sync_models: true
auto_save: true
auto_title: false
# Install mode (cpu, cpu-noavx, nvidia-tensorcores, nvidia, amd-noavx, amd, apple-intel, apple-silicon)
hardware_mode: nvidia-tensorcores
# Automatically open the browser
auto_show_browser: true
# copy to clipboard
copy_to_clipboard_add_all_details: false
# -------------------- Services global configurations --------------------------
# Select the active test to speach, text to image and speach to text services
active_tts_service: "None" # xtts (offline), openai_tts (API key required), elevenlabs_tts, fish_tts (API key required)
active_tti_service: "None" # autosd (offline), diffusers (offline), diffusers_client (online), dall-e (online), midjourney (online)
active_stt_service: "None" # whisper (offline), asr (offline or online), openai_whiosper (API key required)
active_ttm_service: "None" # musicgen (offline)
active_ttv_service: "None" # novita_ai, cog_video_x, diffusers, lumalab (offline)
# -------------------- Services --------------------------
# ***************** STT *****************
stt_input_device: 0
# STT service
stt_listening_threshold: 1000
stt_silence_duration: 2
stt_sound_threshold_percentage: 10
stt_gain: 1.0
stt_rate: 44100
stt_channels: 1
stt_buffer_size: 10
stt_activate_word_detection: false
stt_word_detection_file: null
# ASR STT service
asr_enable: false
asr_base_url: http://localhost:9000
# openai_whisper configuration
openai_whisper_key: ""
openai_whisper_model: "whisper-1"
# whisper configuration
whisper_activate: false
whisper_model: base
# ***************** TTS *****************
tts_output_device: 0
# Voice service
auto_read: false
xtts_current_voice: null
xtts_current_language: en
xtts_stream_chunk_size: 100
xtts_temperature: 0.75
xtts_length_penalty: 1.0
xtts_repetition_penalty: 5.0
xtts_top_k: 50
xtts_top_p: 0.85
xtts_speed: 1
xtts_enable_text_splitting: true
xtts_freq: 22050
# openai_whisper configuration
openai_tts_key: ""
openai_tts_model: "tts-1"
openai_tts_voice: "alloy"
elevenlabs_tts_key: ""
elevenlabs_tts_model_id: "eleven_turbo_v2_5"
elevenlabs_tts_voice_stability: 0.5
elevenlabs_tts_voice_boost: 0.5
elevenlabs_tts_voice_id: EXAVITQu4vr4xnSDxMaL
fish_tts_key: ""
fish_tts_voice: "default"
# ***************** TTI *****************
use_negative_prompt: true
use_ai_generated_negative_prompt: false
negative_prompt_generation_prompt: Generate negative prompt for the following prompt. negative prompt is a set of words that describe things we do not want to have in the generated image.
default_negative_prompt: (((text))), (((ugly))), (((duplicate))), ((morbid)), ((mutilated)), out of frame, extra fingers, mutated hands, ((poorly drawn hands)), ((poorly drawn face)), (((mutation))), (((deformed))), blurry, ((bad anatomy)), (((bad proportions))), ((extra limbs)), cloned face, (((disfigured))), ((extra arms)), (((extra legs))), mutated hands, (fused fingers), (too many fingers), (((long neck))), ((watermark)), ((robot eyes))
# Image generation service
enable_sd_service: false
sd_base_url: http://localhost:7860
# Image generation service
enable_fooocus_service: false
fooocus_base_url: http://localhost:7860
# diffusers
diffusers_offloading_mode: sequential_cpu_offload # sequential_cpu_offload
diffusers_model: v2ray/stable-diffusion-3-medium-diffusers
# diffusers client
diffusers_client_base_url: http://localhost:8593
# Dall e service key
dall_e_key: ""
dall_e_generation_engine: "dall-e-3"
# Midjourney service key
midjourney_key: ""
midjourney_timeout: 300
midjourney_retries: 1
# Image generation service comfyui
enable_comfyui_service: false
comfyui_base_url: http://127.0.0.1:8188/
comfyui_model: v1-5-pruned-emaonly.ckpt
# Motion control service
enable_motion_ctrl_service: false
motion_ctrl_base_url: http://localhost:7861
# ***************** TTV *****************
# Novita_ai configuration
novita_ai_key: ""
cog_video_x_model: "THUDM/CogVideoX-5b"
# lumalabs configuration
lumalabs_key: ""
# ***************** TTT *****************
# ollama service
enable_ollama_service: false
ollama_base_url: http://localhost:11434
# petals service
enable_petals_service: false
petals_base_url: http://localhost:8064
petals_model_path: TinyLlama/TinyLlama-1.1B-Chat-v1.0
petals_device: cuda
# lollms service
enable_lollms_service: false
lollms_access_keys : [] # set a list of keys separated by coma to restrict access
@ -260,25 +130,52 @@ activate_ollama_emulator: true
activate_openai_emulator: true
activate_mistralai_emulator: true
# Install mode (cpu, cpu-noavx, nvidia-tensorcores, nvidia, amd-noavx, amd, apple-intel, apple-silicon)
hardware_mode: nvidia-tensorcores
# copy to clipboard
copy_to_clipboard_add_all_details: false
# -------------------- Services global configurations --------------------------
# Select the active test to speach, text to image and speach to text services
active_tts_service: "None" # xtts (offline), openai_tts (API key required), elevenlabs_tts, fish_tts (API key required)
active_tti_service: "None" # autosd (offline), diffusers (offline), diffusers_client (online), dall-e (online), midjourney (online)
active_stt_service: "None" # whisper (offline), asr (offline or online), openai_whiosper (API key required)
active_ttm_service: "None" # musicgen (offline)
active_ttv_service: "None" # novita_ai, cog_video_x, diffusers, lumalab (offline)
# -------------------- Services --------------------------
# ***************** STT *****************
stt_input_device: 0
stt_listening_threshold: 1000
stt_silence_duration: 2
stt_sound_threshold_percentage: 10
stt_gain: 1.0
stt_rate: 44100
stt_channels: 1
stt_buffer_size: 10
stt_activate_word_detection: false
stt_word_detection_file: null
# ***************** TTS *****************
tts_output_device: 0
auto_read: false
# ***************** TTV *****************
# ***************** TTT *****************
# Smart router
use_smart_routing: false
smart_routing_router_model : ""
smart_routing_models_description : {}
restore_model_after_smart_routing : false
# elastic search service
elastic_search_service: false
elastic_search_url: http://localhost:9200
# vll service
enable_vllm_service: false
vllm_url: http://localhost:8000
vllm_model_path: TinyLlama/TinyLlama-1.1B-Chat-v1.0
vllm_gpu_memory_utilization: 0.9
vllm_max_model_len: 4096
vllm_max_num_seqs: 256
# Audio
media_on: false
audio_in_language: 'en-US'
@ -320,11 +217,6 @@ rag_vectorizer_openai_key: "" # The open ai key (if not provided, this will use
contextual_summary: false #If activated this will completely replace the rag and instead will use contextual summary
activate_skills_lib: false # Activate vectorizing previous conversations
skills_lib_database_name: "default" # Default skills database
max_summary_size: 512 # in tokens
rag_put_chunk_informations_into_context: false # if true then each chunk will be preceded by its information which may waste some context space but allow the ai to point where it found th einformation
rag_build_keys_words: true # If true, when querrying the database, we use keywords generated from the user prompt instead of the prompt itself.
@ -337,13 +229,11 @@ internet_vectorization_nb_chunks: 4 # number of chunks to use
internet_nb_search_pages: 8 # number of pages to select
internet_quick_search: false # If active the search engine will not load and read the webpages
internet_activate_search_decision: false # If active the ai decides by itself if it needs to do search
# Helpers
pdf_latex_path: null
# boosting information
positive_boost: null
negative_boost: null
current_language: english
fun_mode: false
think_first_mode: false
thinking_prompt: "Use a think first process to answer the user:

View File

@ -145,7 +145,7 @@ class LollmsContextDetails:
sacrifice_id += 1
# Append each field to the full context if it exists and is not suppressed
append_context("conditionning")
append_context("conditionning", template.system_full_header)
append_context("documentation", template.system_custom_header("documentation"))
append_context("internet_search_results", template.system_custom_header("Internet search results"))
append_context("user_description")
@ -156,7 +156,7 @@ class LollmsContextDetails:
append_context("think_first_mode")
append_context("extra")
append_context("discussion_messages", template.system_custom_header("Discussion")+"\n")
append_context("discussion_messages", template.system_custom_header("discussion")+"\n")
found_classic_function = False
if not ignore_function_calls: