new upgrades to language management

This commit is contained in:
Saifeddine ALOUI 2024-11-04 01:22:11 +01:00
parent 7ad456806e
commit b2c9f73a3d
3 changed files with 442 additions and 75 deletions

View File

@ -31,7 +31,7 @@ import yaml
import time import time
from lollms.utilities import PackageManager from lollms.utilities import PackageManager
import socket import socket
import shutil
class LollmsApplication(LoLLMsCom): class LollmsApplication(LoLLMsCom):
def __init__( def __init__(
self, self,
@ -803,7 +803,11 @@ class LollmsApplication(LoLLMsCom):
#languages: #languages:
def get_personality_languages(self): def get_personality_languages(self):
languages = [] languages = [self.personality.default_language]
persona_language_path = self.lollms_paths.personalities_zoo_path/self.personality.category/self.personality.name.replace(" ","_")/"languages"
for language_file in persona_language_path.glob("*.yaml"):
language_code = language_file.stem
languages.append(language_code)
# Construire le chemin vers le dossier contenant les fichiers de langue pour la personnalité actuelle # Construire le chemin vers le dossier contenant les fichiers de langue pour la personnalité actuelle
languages_dir = self.lollms_paths.personal_configuration_path / "personalities" / self.personality.name languages_dir = self.lollms_paths.personal_configuration_path / "personalities" / self.personality.name
if self.personality.language: if self.personality.language:
@ -822,10 +826,10 @@ class LollmsApplication(LoLLMsCom):
else: else:
language_code = parts[-1] language_code = parts[-1]
if language_code != default_language: if language_code != default_language and language_code not in languages:
languages.append(language_code) languages.append(language_code)
return [default_language] + languages return languages
@ -833,24 +837,8 @@ class LollmsApplication(LoLLMsCom):
if language is None or language == "": if language is None or language == "":
return False return False
language = language.lower().strip().split()[0] language = language.lower().strip().split()[0]
# Build the conditionning text block self.personality.set_language(language)
default_language = self.personality.language.lower().strip().split()[0]
if language!= default_language:
language_path = self.lollms_paths.personal_configuration_path/"personalities"/self.personality.name/f"languages_{language}.yaml"
if not language_path.exists():
self.ShowBlockingMessage(f"This is the first time this personality speaks {language}\nLollms is reconditionning the persona in that language.\nThis will be done just once. Next time, the personality will speak {language} out of the box")
language_path.parent.mkdir(exist_ok=True, parents=True)
# Translating
conditionning = self.tasks_library.translate_conditionning(self.personality._personality_conditioning, self.personality.language, language)
welcome_message = self.tasks_library.translate_message(self.personality.welcome_message, self.personality.language, language)
with open(language_path,"w",encoding="utf-8", errors="ignore") as f:
yaml.safe_dump({"conditionning":conditionning,"welcome_message":welcome_message}, f)
self.HideBlockingMessage()
else:
with open(language_path,"r",encoding="utf-8", errors="ignore") as f:
language_pack = yaml.safe_load(f)
conditionning = language_pack["conditionning"]
self.config.current_language=language self.config.current_language=language
self.config.save_config() self.config.save_config()
return True return True
@ -934,11 +922,11 @@ class LollmsApplication(LoLLMsCom):
conditionning = self.tasks_library.translate_conditionning(self.personality._personality_conditioning, self.personality.language, current_language) conditionning = self.tasks_library.translate_conditionning(self.personality._personality_conditioning, self.personality.language, current_language)
welcome_message = self.tasks_library.translate_message(self.personality.welcome_message, self.personality.language, current_language) welcome_message = self.tasks_library.translate_message(self.personality.welcome_message, self.personality.language, current_language)
with open(language_path,"w",encoding="utf-8", errors="ignore") as f: with open(language_path,"w",encoding="utf-8", errors="ignore") as f:
yaml.safe_dump({"conditionning":conditionning,"welcome_message":welcome_message}, f) yaml.safe_dump({"personality_conditioning":conditionning,"welcome_message":welcome_message}, f)
else: else:
with open(language_path,"r",encoding="utf-8", errors="ignore") as f: with open(language_path,"r",encoding="utf-8", errors="ignore") as f:
language_pack = yaml.safe_load(f) language_pack = yaml.safe_load(f)
conditionning = language_pack["conditionning"] conditionning = language_pack["personality_conditioning"]
else: else:
conditionning = self.personality._personality_conditioning conditionning = self.personality._personality_conditioning

View File

@ -22,6 +22,7 @@ import pkg_resources
from pathlib import Path from pathlib import Path
from PIL import Image from PIL import Image
import re import re
import shutil
from datetime import datetime from datetime import datetime
import importlib import importlib
@ -163,6 +164,8 @@ class AIPersonality:
self._category: str = "General" self._category: str = "General"
self._category_desc: str = "General" self._category_desc: str = "General"
self._language: str = "english" self._language: str = "english"
self._default_language: str = "english"
self._supported_languages: str = [] self._supported_languages: str = []
self._selected_language: str = selected_language self._selected_language: str = selected_language
self._ignore_discussion_documents_rag:bool = ignore_discussion_documents_rag self._ignore_discussion_documents_rag:bool = ignore_discussion_documents_rag
@ -171,6 +174,7 @@ class AIPersonality:
# Conditionning # Conditionning
self._personality_description: str = "This personality is a helpful and Kind AI ready to help you solve your problems" self._personality_description: str = "This personality is a helpful and Kind AI ready to help you solve your problems"
self._personality_conditioning: str = "\n".join([ self._personality_conditioning: str = "\n".join([
@ -692,6 +696,309 @@ class AIPersonality:
return gen return gen
def generate_codes(self, prompt, max_size = None, temperature = None, top_k = None, top_p=None, repeat_penalty=None, repeat_last_n=None, callback=None, debug=False ):
if len(self.image_files)>0:
response = self.generate_with_images(self.system_custom_header("Generation infos")+ "Generated code must be put inside the adequate markdown code tag. Use this template:\n```language name\nCode\n```\n" + self.separator_template + prompt, self.image_files, max_size, temperature, top_k, top_p, repeat_penalty, repeat_last_n, callback, debug=debug)
else:
response = self.generate(self.system_custom_header("Generation infos")+ "Generated code must be put inside the adequate markdown code tag. Use this template:\n```language name\nCode\n```\n" + self.separator_template + prompt, max_size, temperature, top_k, top_p, repeat_penalty, repeat_last_n, callback, debug=debug)
codes = self.extract_code_blocks(response)
return codes
def generate_code(self, prompt, images=[], max_size = None, temperature = None, top_k = None, top_p=None, repeat_penalty=None, repeat_last_n=None, callback=None, debug=False, return_full_generated_code=False, accept_all_if_no_code_tags_is_present=False, max_continues=5):
response_full = ""
full_prompt = self.system_custom_header("Generation infos")+ "Generated code must be put inside the adequate markdown code tag. Use this template:\n```language name\nCode\n```\nMake sure only a single code tag is generated at each dialogue turn." + self.separator_template + self.system_custom_header("User prompt")+ prompt + self.separator_template + self.ai_custom_header("generated code")
if len(self.image_files)>0:
response = self.generate_with_images(full_prompt, self.image_files, max_size, temperature, top_k, top_p, repeat_penalty, repeat_last_n, callback, debug=debug)
elif len(images)>0:
response = self.generate_with_images(full_prompt, images, max_size, temperature, top_k, top_p, repeat_penalty, repeat_last_n, callback, debug=debug)
else:
response = self.generate(full_prompt, max_size, temperature, top_k, top_p, repeat_penalty, repeat_last_n, callback, debug=debug)
response_full += response
codes = self.extract_code_blocks(response)
if len(codes)==0 and accept_all_if_no_code_tags_is_present:
if return_full_generated_code:
return response, response_full
else:
return response
if len(codes)>0:
if not codes[-1]["is_complete"]:
code = "\n".join(codes[-1]["content"].split("\n")[:-1])
nb_continues = 0
while not codes[-1]["is_complete"] and nb_continues<max_continues:
response = self.generate(full_prompt+code+self.user_full_header+"continue the code. Start from last line and continue the code. Put the code inside a markdown code tag."+self.separator_template+self.ai_full_header, max_size, temperature, top_k, top_p, repeat_penalty, repeat_last_n, callback, debug=debug)
response_full += response
codes = self.extract_code_blocks(response)
if len(codes)==0:
break
else:
if not codes[-1]["is_complete"]:
code +="\n"+ "\n".join(codes[-1]["content"].split("\n")[:-1])
else:
code +="\n"+ "\n".join(codes[-1]["content"].split("\n"))
max_continues += 1
else:
code = codes[-1]["content"]
if return_full_generated_code:
return code, response_full
else:
return code
else:
return None
def generate_text(self, prompt, images=[], max_size = None, temperature = None, top_k = None, top_p=None, repeat_penalty=None, repeat_last_n=None, callback=None, debug=False, return_full_generated_code=False, accept_all_if_no_code_tags_is_present=False):
response_full = ""
full_prompt = self.system_custom_header("Generation infos")+ "Generated text content must be put inside a markdown code tag. Use this template:\n```\nText\n```\nMake sure only a single text tag is generated at each dialogue turn." + self.separator_template + self.system_custom_header("User prompt")+ prompt + self.separator_template + self.ai_custom_header("generated answer")
if len(self.image_files)>0:
response = self.generate_with_images(full_prompt, self.image_files, max_size, temperature, top_k, top_p, repeat_penalty, repeat_last_n, callback, debug=debug)
elif len(images)>0:
response = self.generate_with_images(full_prompt, images, max_size, temperature, top_k, top_p, repeat_penalty, repeat_last_n, callback, debug=debug)
else:
response = self.generate(full_prompt, max_size, temperature, top_k, top_p, repeat_penalty, repeat_last_n, callback, debug=debug)
response_full += response
codes = self.extract_code_blocks(response)
if len(codes)==0 and accept_all_if_no_code_tags_is_present:
if return_full_generated_code:
return response, response_full
else:
return response
if len(codes)>0:
if not codes[-1]["is_complete"]:
code = "\n".join(codes[-1]["content"].split("\n")[:-1])
while not codes[-1]["is_complete"]:
response = self.generate(prompt+code+self.user_full_header+"continue the text. Start from last line and continue the text. Put the text inside a markdown code tag."+self.separator_template+self.ai_full_header, max_size, temperature, top_k, top_p, repeat_penalty, repeat_last_n, callback, debug=debug)
response_full += response
codes = self.extract_code_blocks(response)
if len(codes)==0:
break
else:
if not codes[-1]["is_complete"]:
code +="\n"+ "\n".join(codes[-1]["content"].split("\n")[:-1])
else:
code +="\n"+ "\n".join(codes[-1]["content"].split("\n"))
else:
code = codes[-1]["content"]
if return_full_generated_code:
return code, response_full
else:
return code
else:
return None
def generate_structured_content(self,
prompt,
template,
single_shot=False,
output_format="yaml"):
"""
Generate structured content (YAML/JSON) either in single-shot or step-by-step mode.
Args:
prompt (str): The main prompt describing what to generate
template (dict): Dictionary containing the structure and field-specific prompts
single_shot (bool): If True, generates all content at once. If False, generates field by field
output_format (str): "yaml" or "json"
Returns:
dict: Contains both the structured data and formatted string
"""
# Initialize the output dictionary with default values
output_data = {}
for field, field_info in template.items():
output_data[field] = field_info.get("default", "")
if single_shot:
# Generate all content at once for powerful LLMs
full_prompt = f"""Generate {output_format.upper()} content for: {prompt}
Use this structure:
{output_data}
"""
if self.config.debug and not self.processor:
ASCIIColors.highlight(full_prompt,"source_document_title", ASCIIColors.color_yellow, ASCIIColors.color_red, False)
response = self.generate_code(full_prompt, callback=self.sink, accept_all_if_no_code_tags_is_present=True)
# Parse the response based on format
if output_format == "yaml":
import yaml
try:
cleaned_response = response.replace("```yaml", "").replace("```", "").strip()
output_data = yaml.safe_load(cleaned_response)
except yaml.YAMLError:
# If parsing fails, fall back to step-by-step
single_shot = False
elif output_format == "json":
import json
try:
cleaned_response = response.replace("```json", "").replace("```", "").strip()
output_data = json.loads(cleaned_response)
except json.JSONDecodeError:
# If parsing fails, fall back to step-by-step
single_shot = False
if not single_shot:
# Generate each field individually
for field, field_info in template.items():
if "prompt" in field_info:
field_prompt = field_info["prompt"].format(main_prompt=prompt)
response = self.generate_code(field_prompt, callback=self.sink, accept_all_if_no_code_tags_is_present=True )
# Clean up the response
cleaned_response = response.strip()
# Apply any field-specific processing
if "processor" in field_info:
cleaned_response = field_info["processor"](cleaned_response)
output_data[field] = cleaned_response
# Format the output string
if output_format == "yaml":
formatted_string = ""
for key, value in output_data.items():
if isinstance(value, str) and ("\n" in value or len(value) > 40):
v = value.replace('\n', '\n ')
formatted_string += f"{key}: |\n {v}\n"
else:
formatted_string += f"{key}: {value}\n"
else: # json
formatted_string = json.dumps(output_data, indent=2)
return {
"data": output_data,
"formatted_string": formatted_string
}
def extract_code_blocks(self, text: str, return_remaining_text: bool = False) -> Union[List[dict], Tuple[List[dict], str]]:
"""
This function extracts code blocks from a given text and optionally returns the text without code blocks.
Parameters:
text (str): The text from which to extract code blocks. Code blocks are identified by triple backticks (```).
return_remaining_text (bool): If True, also returns the text with code blocks removed.
Returns:
Union[List[dict], Tuple[List[dict], str]]:
- If return_remaining_text is False: Returns only the list of code block dictionaries
- If return_remaining_text is True: Returns a tuple containing:
* List of code block dictionaries
* String containing the text with all code blocks removed
Each code block dictionary contains:
- 'index' (int): The index of the code block in the text
- 'file_name' (str): The name of the file extracted from the preceding line, if available
- 'content' (str): The content of the code block
- 'type' (str): The type of the code block
- 'is_complete' (bool): True if the block has a closing tag, False otherwise
"""
remaining = text
bloc_index = 0
first_index = 0
indices = []
text_without_blocks = text
# Find all code block delimiters
while len(remaining) > 0:
try:
index = remaining.index("```")
indices.append(index + first_index)
remaining = remaining[index + 3:]
first_index += index + 3
bloc_index += 1
except Exception as ex:
if bloc_index % 2 == 1:
index = len(remaining)
indices.append(index)
remaining = ""
code_blocks = []
is_start = True
# Process code blocks and build text without blocks if requested
if return_remaining_text:
text_parts = []
last_end = 0
for index, code_delimiter_position in enumerate(indices):
if is_start:
block_infos = {
'index': len(code_blocks),
'file_name': "",
'section': "",
'content': "",
'type': "",
'is_complete': False
}
# Store text before code block if returning remaining text
if return_remaining_text:
text_parts.append(text[last_end:code_delimiter_position].strip())
# Check the preceding line for file name
preceding_text = text[:code_delimiter_position].strip().splitlines()
if preceding_text:
last_line = preceding_text[-1].strip()
if last_line.startswith("<file_name>") and last_line.endswith("</file_name>"):
file_name = last_line[len("<file_name>"):-len("</file_name>")].strip()
block_infos['file_name'] = file_name
elif last_line.startswith("## filename:"):
file_name = last_line[len("## filename:"):].strip()
block_infos['file_name'] = file_name
if last_line.startswith("<section>") and last_line.endswith("</section>"):
section = last_line[len("<section>"):-len("</section>")].strip()
block_infos['section'] = section
sub_text = text[code_delimiter_position + 3:]
if len(sub_text) > 0:
try:
find_space = sub_text.index(" ")
except:
find_space = int(1e10)
try:
find_return = sub_text.index("\n")
except:
find_return = int(1e10)
next_index = min(find_return, find_space)
if '{' in sub_text[:next_index]:
next_index = 0
start_pos = next_index
if code_delimiter_position + 3 < len(text) and text[code_delimiter_position + 3] in ["\n", " ", "\t"]:
block_infos["type"] = 'language-specific'
else:
block_infos["type"] = sub_text[:next_index]
if index + 1 < len(indices):
next_pos = indices[index + 1] - code_delimiter_position
if next_pos - 3 < len(sub_text) and sub_text[next_pos - 3] == "`":
block_infos["content"] = sub_text[start_pos:next_pos - 3].strip()
block_infos["is_complete"] = True
else:
block_infos["content"] = sub_text[start_pos:next_pos].strip()
block_infos["is_complete"] = False
if return_remaining_text:
last_end = indices[index + 1] + 3
else:
block_infos["content"] = sub_text[start_pos:].strip()
block_infos["is_complete"] = False
if return_remaining_text:
last_end = len(text)
code_blocks.append(block_infos)
is_start = False
else:
is_start = True
if return_remaining_text:
# Add any remaining text after the last code block
if last_end < len(text):
text_parts.append(text[last_end:].strip())
# Join all non-code parts with newlines
text_without_blocks = '\n'.join(filter(None, text_parts))
return code_blocks, text_without_blocks
return code_blocks
def process(self, text:str, message_type:MSG_OPERATION_TYPE, callback=None, show_progress=False): def process(self, text:str, message_type:MSG_OPERATION_TYPE, callback=None, show_progress=False):
@ -783,6 +1090,91 @@ class AIPersonality:
def __str__(self): def __str__(self):
return f"{self.category}/{self.name}" return f"{self.category}/{self.name}"
def set_config(self, config, is_default_language= True):
# Load parameters from the configuration file
self._version = config.get("version", self._version)
self._author = config.get("author", self._author)
self._name = config.get("name", self._name)
self._creation_date = config.get("creation_date", self._creation_date)
self._last_update_date = config.get("last_update_date", self._last_update_date)
self._user_name = config.get("user_name", self._user_name)
self._category_desc = config.get("category", self._category)
self._language = config.get("language", self._language)
if is_default_language:
self._default_language = config.get("language", self._language)
self._ignore_discussion_documents_rag = config.get("ignore_discussion_documents_rag", self._ignore_discussion_documents_rag)
self._personality_description = config.get("personality_description", self._personality_description)
self._personality_conditioning = config.get("personality_conditioning", self._personality_conditioning)
self._prompts_list = config.get("prompts_list", self._prompts_list)
self._welcome_message = config.get("welcome_message", self._welcome_message)
self._include_welcome_message_in_discussion = config.get("include_welcome_message_in_discussion", self._include_welcome_message_in_discussion)
self._user_message_prefix = config.get("user_message_prefix", self._user_message_prefix)
self._link_text = config.get("link_text", self._link_text)
self._ai_message_prefix = config.get("ai_message_prefix", self._ai_message_prefix)
self._dependencies = config.get("dependencies", self._dependencies)
self._disclaimer = config.get("disclaimer", self._disclaimer)
self._help = config.get("help", self._help)
self._commands = config.get("commands", self._commands)
self._model_temperature = config.get("model_temperature", self._model_temperature)
self._model_top_k = config.get("model_top_k", self._model_top_k)
self._model_top_p = config.get("model_top_p", self._model_top_p)
self._model_repeat_penalty = config.get("model_repeat_penalty", self._model_repeat_penalty)
self._model_repeat_last_n = config.get("model_repeat_last_n", self._model_repeat_last_n)
# Script parameters (for example keys to connect to search engine or any other usage)
self._processor_cfg = config.get("processor_cfg", self._processor_cfg)
def set_language(self, language):
if self.language.lower().strip() and self.language.lower().strip()!= language:
language_path = self.app.lollms_paths.personal_configuration_path/"personalities"/self.name/f"languages_{language}.yaml"
if not language_path.exists():
#checking if there is already a translation in the personality folder
persona_language_path = self.lollms_paths.personalities_zoo_path/self.category/self.name.replace(" ","_")/"languages"/f"{language}.yaml"
if persona_language_path.exists():
shutil.copy(persona_language_path, language_path)
with open(language_path,"r",encoding="utf-8", errors="ignore") as f:
config = yaml.safe_load(f)
self.set_config(config, False)
else:
# this is a new language
try:
self.ShowBlockingMessage(f"This is the first time this personality speaks {language}\nLollms is reconditionning the persona in that language.\nThis will be done just once. Next time, the personality will speak {language} out of the box")
language_path.parent.mkdir(exist_ok=True, parents=True)
package_path = self.personality_package_path
# Verify that there is at least a configuration file
config_file = package_path / "config.yaml"
if not config_file.exists():
raise ValueError(f"The provided folder {package_path} does not exist.")
with open(config_file, "r", encoding='utf-8') as f:
default_config = f.read()
# Translating
new_config = self.generate_code(f"Translate the following yaml file values to {language}.\n```yaml\n{default_config}```\n")
with open(language_path,"w",encoding="utf-8", errors="ignore") as f:
f.write(new_config)
with open(language_path,"r",encoding="utf-8", errors="ignore") as f:
new_config = yaml.safe_load(f)
self.set_config(new_config)
self.HideBlockingMessage()
except Exception as ex:
trace_exception(ex)
self.InfoMessage(f"Couldn't translate personality to {language}.\nThe model you are using may be unable to do this task. We'll switch to conditionning language insertion mode.")
else:
with open(language_path,"r",encoding="utf-8", errors="ignore") as f:
config = yaml.safe_load(f)
self.set_config(config, False)
else:
pass
def load_personality(self, package_path=None): def load_personality(self, package_path=None):
""" """
@ -814,57 +1206,32 @@ class AIPersonality:
else: else:
self._secret_cfg = None self._secret_cfg = None
languages = package_path / "languages" self.set_config(config, True)
if languages.exists(): if config["language"]:
self._supported_languages = [] default_language = config["language"].lower().strip().split()[0]
for language in [l for l in languages.iterdir()]: else:
self._supported_languages.append(language.stem) default_language = 'english'
if self._selected_language is not None and self._selected_language in self._supported_languages: current_language = self.config.current_language.lower().strip().split()[0]
config_file = languages / (self._selected_language+".yaml")
with open(config_file, "r", encoding='utf-8') as f: if current_language and current_language!= default_language:
language_path = self.app.lollms_paths.personal_configuration_path/"personalities"/config["name"]/f"languages_{current_language}.yaml"
if not language_path.exists():
#checking if there is already a translation in the personality folder
persona_language_path = self.lollms_paths.personalities_zoo_path/self.category/self.name.replace(" ","_")/"languages"/f"{current_language}.yaml"
if persona_language_path.exists():
shutil.copy(persona_language_path, language_path)
with open(language_path,"r",encoding="utf-8", errors="ignore") as f:
config = yaml.safe_load(f)
self.set_config(config, False)
else:
with open(language_path,"r",encoding="utf-8", errors="ignore") as f:
config = yaml.safe_load(f) config = yaml.safe_load(f)
self.set_config(config, False)
# Load parameters from the configuration file
self._version = config.get("version", self._version)
self._author = config.get("author", self._author)
self._name = config.get("name", self._name)
self._creation_date = config.get("creation_date", self._creation_date)
self._last_update_date = config.get("last_update_date", self._last_update_date)
self._user_name = config.get("user_name", self._user_name)
self._category_desc = config.get("category", self._category)
self._language = config.get("language", self._language)
self._ignore_discussion_documents_rag = config.get("ignore_discussion_documents_rag", self._ignore_discussion_documents_rag)
self._personality_description = config.get("personality_description", self._personality_description)
self._personality_conditioning = config.get("personality_conditioning", self._personality_conditioning)
self._prompts_list = config.get("prompts_list", self._prompts_list)
self._welcome_message = config.get("welcome_message", self._welcome_message)
self._include_welcome_message_in_discussion = config.get("include_welcome_message_in_discussion", self._include_welcome_message_in_discussion)
self._user_message_prefix = config.get("user_message_prefix", self._user_message_prefix)
self._link_text = config.get("link_text", self._link_text)
self._ai_message_prefix = config.get("ai_message_prefix", self._ai_message_prefix)
self._dependencies = config.get("dependencies", self._dependencies)
self._disclaimer = config.get("disclaimer", self._disclaimer)
self._help = config.get("help", self._help)
self._commands = config.get("commands", self._commands)
self._model_temperature = config.get("model_temperature", self._model_temperature)
self._model_top_k = config.get("model_top_k", self._model_top_k)
self._model_top_p = config.get("model_top_p", self._model_top_p)
self._model_repeat_penalty = config.get("model_repeat_penalty", self._model_repeat_penalty)
self._model_repeat_last_n = config.get("model_repeat_last_n", self._model_repeat_last_n)
# Script parameters (for example keys to connect to search engine or any other usage)
self._processor_cfg = config.get("processor_cfg", self._processor_cfg)
#set package path #set package path
self.personality_package_path = package_path self.personality_package_path = package_path
@ -1126,6 +1493,7 @@ class AIPersonality:
"user_name": self._user_name, "user_name": self._user_name,
"category": self._category, "category": self._category,
"language": self._language, "language": self._language,
"default_language": self._default_language,
"supported_languages": self._supported_languages, "supported_languages": self._supported_languages,
"selected_language": self._selected_language, "selected_language": self._selected_language,
"ignore_discussion_documents_rag": self._ignore_discussion_documents_rag, "ignore_discussion_documents_rag": self._ignore_discussion_documents_rag,
@ -1170,6 +1538,7 @@ class AIPersonality:
"user_name": self._user_name, "user_name": self._user_name,
"category": self._category, "category": self._category,
"language": self._language, "language": self._language,
"default_language": self._default_language,
"supported_languages": self._supported_languages, "supported_languages": self._supported_languages,
"selected_language": self._selected_language, "selected_language": self._selected_language,
"ignore_discussion_documents_rag": self._ignore_discussion_documents_rag, "ignore_discussion_documents_rag": self._ignore_discussion_documents_rag,
@ -1280,6 +1649,14 @@ class AIPersonality:
"""Get the language.""" """Get the language."""
return self._language return self._language
@property
def default_language(self) -> str:
"""Get the default language."""
return self._default_language
@property @property
def category(self) -> str: def category(self) -> str:
"""Get the category.""" """Get the category."""
@ -1295,6 +1672,12 @@ class AIPersonality:
"""Set the language.""" """Set the language."""
self._language = value self._language = value
@default_language.setter
def language(self, value: str):
"""Set the default language."""
self._default_language = value
@category.setter @category.setter
def category(self, value: str): def category(self, value: str):
"""Set the category.""" """Set the category."""
@ -1754,7 +2137,7 @@ class AIPersonality:
@property @property
def ai_full_header(self) -> str: def ai_full_header(self) -> str:
"""Get the start_header_id_template.""" """Get the start_header_id_template."""
return f"{self.start_user_header_id_template}{self.personality.name}{self.end_user_header_id_template}" return f"{self.start_user_header_id_template}{self.name}{self.end_user_header_id_template}"
def system_custom_header(self, ai_name) -> str: def system_custom_header(self, ai_name) -> str:
"""Get the start_header_id_template.""" """Get the start_header_id_template."""

View File

@ -513,12 +513,8 @@ class TasksLibrary:
return code_blocks return code_blocks
def translate_conditionning(self, prompt, original_language, language): def translate_conditionning(self, prompt, original_language, language):
start_header_id_template = self.config.start_header_id_template conditionning_translation_text = f"{self.lollms.system_full_header}Translate the following prompt to {language}.\n{self.lollms.separator_template}{self.lollms.ai_custom_header('prompt')}\n```{original_language}\n{prompt}\n```\nPut the answer inside a {language} markdown tag like this:\n```{language}\nTranslated text\n```\n{self.lollms.ai_custom_header('translation')}"
end_header_id_template = self.config.end_header_id_template cond_translation = self.fast_gen(conditionning_translation_text, temperature=0.1, callback=self.sink)
system_message_template = self.config.system_message_template
separator_template = self.config.separator_template
conditionning_translation_text = f"{start_header_id_template}{system_message_template}{end_header_id_template}Translate the following prompt to {language}.\nDo not translate any css or code, just the text and strings.{separator_template}{start_header_id_template}prompt{end_header_id_template}\n```{original_language}\n{prompt.replace(f'{start_header_id_template}','')}\n```{separator_template}{start_header_id_template}translation{end_header_id_template}\nHere is the translated prompt:\n```{language}\n"
cond_translation = f"```{language}\n"+self.fast_gen(conditionning_translation_text, temperature=0.1, callback=self.sink)
response = self.extract_code_blocks(cond_translation) response = self.extract_code_blocks(cond_translation)
if len(response)>0 and len(response[0]["content"])>0: if len(response)>0 and len(response[0]["content"])>0:
conditionning = response[0]["content"] conditionning = response[0]["content"]