This commit is contained in:
Saifeddine ALOUI 2024-07-28 20:07:28 +02:00
parent 9e4159c110
commit 0d9b7ad5e0
2 changed files with 21 additions and 5 deletions

View File

@ -837,7 +837,7 @@ class LollmsApplication(LoLLMsCom):
discussion += "\n" + self.config.discussion_prompt_separator + msg.sender + ": " + msg.content.strip()
return discussion
# -------------------------------------- Prompt preparing
def prepare_query(self, client_id: str, message_id: int = -1, is_continue: bool = False, n_tokens: int = 0, generation_type = None, force_using_internet=False) -> Tuple[str, str, List[str]]:
def prepare_query(self, client_id: str, message_id: int = -1, is_continue: bool = False, n_tokens: int = 0, generation_type = None, force_using_internet=False, previous_chunk="") -> Tuple[str, str, List[str]]:
"""
Prepares the query for the model.
@ -1374,7 +1374,8 @@ class LollmsApplication(LoLLMsCom):
"extra":"",
"available_space":available_space,
"skills":skills,
"is_continue":is_continue
"is_continue":is_continue,
"previous_chunk":previous_chunk
}
if self.config.debug:
ASCIIColors.highlight(documentation,"source_document_title", ASCIIColors.color_yellow, ASCIIColors.color_red, False)

View File

@ -777,9 +777,11 @@ class AIPersonality:
self.bot_says = ""
if debug:
self.print_prompt("gen",prompt)
ntokens = self.model.tokenize(prompt)
self.model.generate(
prompt,
max_size if max_size else min(self.config.ctx_size-len(self.model.tokenize(prompt)), self.config.max_n_predict),
max_size if max_size else min(self.config.ctx_size-ntokens,self.config.max_n_predict),
partial(self.process, callback=callback, show_progress=show_progress),
temperature=self.model_temperature if temperature is None else temperature,
top_k=self.model_top_k if top_k is None else top_k,
@ -3831,9 +3833,22 @@ class APScript(StateMachine):
return rounds_info
def answer(self, context_details, callback=None, send_full=True):
if context_details["is_continue"]:
full_prompt = self.build_prompt_from_context_details(context_details, suppress= ["ai_prefix"])
else:
full_prompt = self.build_prompt_from_context_details(context_details)
out = self.fast_gen(full_prompt)
nb_tokens = len(self.personality.model.tokenize(out))
if nb_tokens >= self.config.max_n_predict-1:
out = out+self.fast_gen(full_prompt+out, callback=callback)
if context_details["is_continue"]:
out = context_details["previous_chunk"] + out
if send_full:
self.full(out)
return out
def generate_with_function_calls(self, context_details: dict, functions: List[Dict[str, Any]], max_answer_length: Optional[int] = None, callback = None) -> List[Dict[str, Any]]:
"""
Performs text generation with function calls.