mirror of
https://github.com/ParisNeo/lollms.git
synced 2024-12-24 06:46:40 +00:00
added print prompt to fastgen
This commit is contained in:
parent
20ac66c5a6
commit
e4ed050527
@ -169,6 +169,13 @@ Date: {{date}}
|
||||
# Open and store the personality
|
||||
self.load_personality()
|
||||
|
||||
def print_prompt(self, title, prompt):
|
||||
ASCIIColors.red("*-*-*-*-*-*-*-* ", end="")
|
||||
ASCIIColors.red(title, end="")
|
||||
ASCIIColors.red(" *-*-*-*-*-*-*-*")
|
||||
ASCIIColors.yellow(prompt)
|
||||
ASCIIColors.red(" *-*-*-*-*-*-*-*")
|
||||
|
||||
|
||||
def fast_gen(self, prompt: str, max_generation_size: int=None, placeholders: dict = {}, sacrifice: list = ["previous_discussion"], debug: bool = False, callback=None) -> str:
|
||||
"""
|
||||
@ -187,6 +194,9 @@ Date: {{date}}
|
||||
Returns:
|
||||
- str: The generated text after removing special tokens ("<s>" and "</s>") and stripping any leading/trailing whitespace.
|
||||
"""
|
||||
if debug == False:
|
||||
debug = self.config.debug
|
||||
|
||||
if max_generation_size is None:
|
||||
prompt_size = self.model.tokenize(prompt)
|
||||
max_generation_size = self.model.config.ctx_size - len(prompt_size)
|
||||
|
Loading…
Reference in New Issue
Block a user