mirror of
https://github.com/ParisNeo/lollms.git
synced 2024-12-18 20:27:58 +00:00
changed
This commit is contained in:
parent
ac1bb16dd7
commit
75682dcbf3
@ -736,6 +736,9 @@ class AIPersonality:
|
||||
if debug:
|
||||
self.print_prompt("gen",prompt)
|
||||
|
||||
if max_size is None:
|
||||
max_size = min(self.config.max_n_predict, self.config.ctx_size-len(self.model.tokenize(prompt)))
|
||||
|
||||
self.model.generate_with_images(
|
||||
prompt,
|
||||
images,
|
||||
@ -2394,6 +2397,33 @@ class APScript(StateMachine):
|
||||
return self.personality.generate(prompt, max_size, temperature, top_k, top_p, repeat_penalty, repeat_last_n, callback, debug=debug)
|
||||
|
||||
|
||||
def generate_codes(self, prompt, max_size = None, temperature = None, top_k = None, top_p=None, repeat_penalty=None, repeat_last_n=None, callback=None, debug=False ):
|
||||
if len(self.personality.image_files)>0:
|
||||
response = self.personality.generate_with_images(self.system_custom_header("Generation infos")+ "Generated code must be put inside the adequate markdown code tag. Use this template:\n```language name\nCode\n```\n" + self.separator_template + prompt, self.personality.image_files, max_size, temperature, top_k, top_p, repeat_penalty, repeat_last_n, callback, debug=debug)
|
||||
else:
|
||||
response = self.personality.generate(self.system_custom_header("Generation infos")+ "Generated code must be put inside the adequate markdown code tag. Use this template:\n```language name\nCode\n```\n" + self.separator_template + prompt, max_size, temperature, top_k, top_p, repeat_penalty, repeat_last_n, callback, debug=debug)
|
||||
codes = self.extract_code_blocks(response)
|
||||
return codes
|
||||
|
||||
def generate_code(self, prompt, max_size = None, temperature = None, top_k = None, top_p=None, repeat_penalty=None, repeat_last_n=None, callback=None, debug=False ):
|
||||
if len(self.personality.image_files)>0:
|
||||
response = self.personality.generate_with_images(self.system_custom_header("Generation infos")+ "Generated code must be put inside the adequate markdown code tag. Use this template:\n```language name\nCode\n```\nMake sure only a single code tag is generated at each dialogue turn." + self.separator_template + prompt, self.personality.image_files, max_size, temperature, top_k, top_p, repeat_penalty, repeat_last_n, callback, debug=debug)
|
||||
else:
|
||||
response = self.personality.generate(self.system_custom_header("Generation infos")+ "Generated code must be put inside the adequate markdown code tag. Use this template:\n```language name\nCode\n```\nMake sure only a single code tag is generated at each dialogue turn." + self.separator_template + prompt, max_size, temperature, top_k, top_p, repeat_penalty, repeat_last_n, callback, debug=debug)
|
||||
codes = self.extract_code_blocks(response)
|
||||
if len(codes)>0:
|
||||
code = codes[-1]["content"].split("\n")[:-1]
|
||||
while not codes[-1]["is_complete"]:
|
||||
response = self.personality.generate(prompt+code+self.user_full_header+"continue"+self.ai_full_header, max_size, temperature, top_k, top_p, repeat_penalty, repeat_last_n, callback, debug=debug)
|
||||
codes = self.extract_code_blocks(response)
|
||||
if len(codes)==0:
|
||||
break
|
||||
else:
|
||||
code +="\n"+ codes[-1]["content"].split("\n")[:-1]
|
||||
return code
|
||||
else:
|
||||
return None
|
||||
|
||||
def run_workflow(self, prompt:str, previous_discussion_text:str="", callback: Callable[[str | list | None, MSG_OPERATION_TYPE, str, AIPersonality| None], bool]=None, context_details:dict=None, client:Client=None):
|
||||
"""
|
||||
This function generates code based on the given parameters.
|
||||
|
Loading…
Reference in New Issue
Block a user