mirror of
https://github.com/ParisNeo/lollms.git
synced 2024-12-24 14:56:44 +00:00
Update personality.py
This commit is contained in:
parent
d0b90d9599
commit
0a255b9e17
@ -2404,20 +2404,20 @@ The AI should respond in this format using data from actions_list:
|
|||||||
|
|
||||||
return code_blocks
|
return code_blocks
|
||||||
|
|
||||||
def yes_no(self, question: str, context:str="", max_answer_length: int = 50) -> bool:
|
def yes_no(self, question: str, context:str="", max_answer_length: int = 50, conditionning="") -> bool:
|
||||||
"""
|
"""
|
||||||
Analyzes the user prompt and answers whether it is asking to generate an image.
|
Analyzes the user prompt and answers whether it is asking to generate an image.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
question (str): The user's message.
|
question (str): The user's message.
|
||||||
max_answer_length (int, optional): The maximum length of the generated answer. Defaults to 50.
|
max_answer_length (int, optional): The maximum length of the generated answer. Defaults to 50.
|
||||||
|
conditionning: An optional system message to put at the beginning of the prompt
|
||||||
Returns:
|
Returns:
|
||||||
bool: True if the user prompt is asking to generate an image, False otherwise.
|
bool: True if the user prompt is asking to generate an image, False otherwise.
|
||||||
"""
|
"""
|
||||||
return self.multichoice_question(question, ["no","yes"], context, max_answer_length)>0
|
return self.multichoice_question(question, ["no","yes"], context, max_answer_length, conditionning=conditionning)>0
|
||||||
|
|
||||||
def multichoice_question(self, question: str, possible_answers:list, context:str = "", max_answer_length: int = 50) -> int:
|
def multichoice_question(self, question: str, possible_answers:list, context:str = "", max_answer_length: int = 50, conditionning="") -> int:
|
||||||
"""
|
"""
|
||||||
Interprets a multi-choice question from a users response. This function expects only one choice as true. All other choices are considered false. If none are correct, returns -1.
|
Interprets a multi-choice question from a users response. This function expects only one choice as true. All other choices are considered false. If none are correct, returns -1.
|
||||||
|
|
||||||
@ -2425,35 +2425,31 @@ The AI should respond in this format using data from actions_list:
|
|||||||
question (str): The multi-choice question posed by the user.
|
question (str): The multi-choice question posed by the user.
|
||||||
possible_ansers (List[Any]): A list containing all valid options for the chosen value. For each item in the list, either 'True', 'False', None or another callable should be passed which will serve as the truth test function when checking against the actual user input.
|
possible_ansers (List[Any]): A list containing all valid options for the chosen value. For each item in the list, either 'True', 'False', None or another callable should be passed which will serve as the truth test function when checking against the actual user input.
|
||||||
max_answer_length (int, optional): Maximum string length allowed while interpreting the users' responses. Defaults to 50.
|
max_answer_length (int, optional): Maximum string length allowed while interpreting the users' responses. Defaults to 50.
|
||||||
|
conditionning: An optional system message to put at the beginning of the prompt
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
int: Index of the selected option within the possible_ansers list. Or -1 if there was not match found among any of them.
|
int: Index of the selected option within the possible_ansers list. Or -1 if there was not match found among any of them.
|
||||||
"""
|
"""
|
||||||
choices = "\n".join([f"{i}. {possible_answer}" for i, possible_answer in enumerate(possible_answers)])
|
choices = "\n".join([f"{i}. {possible_answer}" for i, possible_answer in enumerate(possible_answers)])
|
||||||
|
elements = [conditionning] if conditionning!="" else []
|
||||||
|
elements += [
|
||||||
|
"!@>instructions:",
|
||||||
|
"Answer this multi choices question.",
|
||||||
|
"Answer with an id from the possible answers.",
|
||||||
|
"Do not answer with an id outside this possible answers.",
|
||||||
|
f"!@>question: {question}",
|
||||||
|
"!@>possible answers:",
|
||||||
|
f"{choices}",
|
||||||
|
]
|
||||||
if context!="":
|
if context!="":
|
||||||
prompt = self.build_prompt([
|
elements+=[
|
||||||
"!@>instructions:",
|
"!@>Context:",
|
||||||
"Answer this multi choices question.",
|
f"{context}",
|
||||||
"Answer with an id from the possible answers.",
|
]
|
||||||
"Do not answer with an id outside this possible answers.",
|
|
||||||
f"!@>question: {question}",
|
elements += ["!@>answer:"]
|
||||||
"!@>possible answers:",
|
prompt = self.build_prompt(elements)
|
||||||
f"{choices}",
|
|
||||||
"!@>Context:",
|
|
||||||
f"{context}",
|
|
||||||
"!@>answer:"
|
|
||||||
])
|
|
||||||
else:
|
|
||||||
prompt = self.build_prompt([
|
|
||||||
"!@>instructions:",
|
|
||||||
"Answer this multi choices question.",
|
|
||||||
"Answer with an id from the possible answers.",
|
|
||||||
"Do not answer with an id outside this possible answers.",
|
|
||||||
f"!@>question: {question}",
|
|
||||||
"!@>possible answers:",
|
|
||||||
f"{choices}",
|
|
||||||
"!@>answer:"
|
|
||||||
])
|
|
||||||
gen = self.generate(prompt, max_answer_length, temperature=0.1, top_k=50, top_p=0.9, repeat_penalty=1.0, repeat_last_n=50).strip().replace("</s>","").replace("<s>","")
|
gen = self.generate(prompt, max_answer_length, temperature=0.1, top_k=50, top_p=0.9, repeat_penalty=1.0, repeat_last_n=50).strip().replace("</s>","").replace("<s>","")
|
||||||
selection = gen.strip().split()[0].replace(",","").replace(".","")
|
selection = gen.strip().split()[0].replace(",","").replace(".","")
|
||||||
self.print_prompt("Multi choice selection",prompt+gen)
|
self.print_prompt("Multi choice selection",prompt+gen)
|
||||||
@ -2463,7 +2459,7 @@ The AI should respond in this format using data from actions_list:
|
|||||||
ASCIIColors.cyan("Model failed to answer the question")
|
ASCIIColors.cyan("Model failed to answer the question")
|
||||||
return -1
|
return -1
|
||||||
|
|
||||||
def multichoice_ranking(self, question: str, possible_answers:list, context:str = "", max_answer_length: int = 50) -> int:
|
def multichoice_ranking(self, question: str, possible_answers:list, context:str = "", max_answer_length: int = 50, conditionning="") -> int:
|
||||||
"""
|
"""
|
||||||
Ranks answers for a question from best to worst. returns a list of integers
|
Ranks answers for a question from best to worst. returns a list of integers
|
||||||
|
|
||||||
@ -2471,33 +2467,33 @@ The AI should respond in this format using data from actions_list:
|
|||||||
question (str): The multi-choice question posed by the user.
|
question (str): The multi-choice question posed by the user.
|
||||||
possible_ansers (List[Any]): A list containing all valid options for the chosen value. For each item in the list, either 'True', 'False', None or another callable should be passed which will serve as the truth test function when checking against the actual user input.
|
possible_ansers (List[Any]): A list containing all valid options for the chosen value. For each item in the list, either 'True', 'False', None or another callable should be passed which will serve as the truth test function when checking against the actual user input.
|
||||||
max_answer_length (int, optional): Maximum string length allowed while interpreting the users' responses. Defaults to 50.
|
max_answer_length (int, optional): Maximum string length allowed while interpreting the users' responses. Defaults to 50.
|
||||||
|
conditionning: An optional system message to put at the beginning of the prompt
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
int: Index of the selected option within the possible_ansers list. Or -1 if there was not match found among any of them.
|
int: Index of the selected option within the possible_ansers list. Or -1 if there was not match found among any of them.
|
||||||
"""
|
"""
|
||||||
|
choices = "\n".join([f"{i}. {possible_answer}" for i, possible_answer in enumerate(possible_answers)])
|
||||||
|
elements = [conditionning] if conditionning!="" else []
|
||||||
|
elements += [
|
||||||
|
"!@>instructions:",
|
||||||
|
"Answer this multi choices question.",
|
||||||
|
"Answer with an id from the possible answers.",
|
||||||
|
"Do not answer with an id outside this possible answers.",
|
||||||
|
f"!@>question: {question}",
|
||||||
|
"!@>possible answers:",
|
||||||
|
f"{choices}",
|
||||||
|
]
|
||||||
if context!="":
|
if context!="":
|
||||||
prompt = self.build_prompt([
|
elements+=[
|
||||||
"!@>instruction:",
|
"!@>Context:",
|
||||||
"Act as prompt ranker, a tool capable of ranking the user prompt. The ranks are returned as a python list. Do not add comments.",
|
f"{context}",
|
||||||
"!@>Context:",
|
]
|
||||||
f"{context}",
|
|
||||||
"!@>question: {{question}}",
|
|
||||||
"!@>choices:",
|
|
||||||
"{{choices}}",
|
|
||||||
"!@>prompt analyzer: After analyzing the user prompt, here is my ranking of the choices from best to worst : ranks=["
|
|
||||||
])
|
|
||||||
else:
|
|
||||||
prompt = self.build_prompt([
|
|
||||||
"!@>instruction:",
|
|
||||||
"Act as prompt ranker, a tool capable of ranking the user prompt. The ranks are returned as a python list. Do not add comments.",
|
|
||||||
"!@>question: {{question}}",
|
|
||||||
"!@>choices:",
|
|
||||||
"{{choices}}",
|
|
||||||
"!@>prompt analyzer: After analyzing the user prompt, here is my ranking of the choices from best to worst : ranks=["
|
|
||||||
])
|
|
||||||
|
|
||||||
gen = "["+self.generate(prompt, max_answer_length).strip().replace("</s>","").replace("<s>","")
|
elements += ["!@>answer:"]
|
||||||
self.print_prompt("Multi choice selection",prompt+gen)
|
prompt = self.build_prompt(elements)
|
||||||
|
|
||||||
|
gen = self.generate(prompt, max_answer_length, temperature=0.1, top_k=50, top_p=0.9, repeat_penalty=1.0, repeat_last_n=50).strip().replace("</s>","").replace("<s>","")
|
||||||
|
self.print_prompt("Multi choice ranking",prompt+gen)
|
||||||
if gen.index("]")>=0:
|
if gen.index("]")>=0:
|
||||||
try:
|
try:
|
||||||
ranks = eval(gen.split("]")[0]+"]")
|
ranks = eval(gen.split("]")[0]+"]")
|
||||||
@ -2510,6 +2506,7 @@ The AI should respond in this format using data from actions_list:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def info(self, info_text:str, callback: Callable[[str, MSG_TYPE, dict, list], bool]=None):
|
def info(self, info_text:str, callback: Callable[[str, MSG_TYPE, dict, list], bool]=None):
|
||||||
"""This sends info text to front end
|
"""This sends info text to front end
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user