upgraded to v 1.3

This commit is contained in:
Saifeddine ALOUI 2024-02-26 08:56:34 +01:00
parent 01aeada6a3
commit ac5d2b6df0
3 changed files with 6 additions and 28 deletions

View File

@ -251,30 +251,6 @@ class LollmsApplication(LoLLMsCom):
return string
def safe_generate(self, full_discussion:str, n_predict=None, callback: Callable[[str, int, dict], bool]=None, placeholder={}, place_holders_to_sacrifice=[], debug=False):
"""safe_generate
Args:
full_discussion (string): A prompt or a long discussion to use for generation
callback (_type_, optional): A callback to call for each received token. Defaults to None.
Returns:
str: Model output
"""
full_discussion = PromptReshaper(full_discussion).build(placeholder, self.model.tokenize, self.model.detokenize, max_nb_tokens=self.config.ctx_size-n_predict, place_holders_to_sacrifice=place_holders_to_sacrifice )
if debug:
ASCIIColors.yellow(full_discussion)
if n_predict == None:
n_predict =self.personality.model_n_predicts
self.bot_says = ""
if self.personality.processor is not None and self.personality.processor_cfg["custom_workflow"]:
ASCIIColors.info("processing...")
generated_text = self.personality.processor.run_workflow(full_discussion.split("!@>")[-1] if "!@>" in full_discussion else full_discussion, previous_discussion_text=self.personality.personality_conditioning+fd, callback=callback)
else:
ASCIIColors.info("generating...")
generated_text = self.personality.model.generate(full_discussion, n_predict=n_predict, callback=callback)
return generated_text
def load_binding(self):
try:
binding = BindingBuilder().build_binding(self.config, self.lollms_paths, lollmsCom=self)
@ -558,7 +534,9 @@ class LollmsApplication(LoLLMsCom):
if self.config.data_vectorization_build_keys_words:
discussion = self.recover_discussion(client_id)
query = self.personality.fast_gen(f"\n!@>instruction: Read the discussion and rewrite the last prompt for someone who didn't read the entire discussion.\nDo not answer the prompt. Do not add explanations.\n!@>discussion:\n{discussion[-2048:]}\n!@>enhanced query: ", max_generation_size=256, show_progress=True)
self.personality.step_start("Building vector store query")
query = self.personality.fast_gen(f"\n!@>instruction: Read the discussion and rewrite the last prompt for someone who didn't read the entire discussion.\nDo not answer the prompt. Do not add explanations.\n!@>discussion:\n{discussion[-2048:]}\n!@>enhanced query: ", max_generation_size=256, show_progress=True, callback=self.personality.sink)
self.personality.step_end("Building vector store query")
ASCIIColors.cyan(f"Query: {query}")
else:
query = current_message.content

View File

@ -1950,10 +1950,10 @@ class APScript(StateMachine):
ASCIIColors.blue("*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*")
def add_file(self, path, callback=None):
def add_file(self, path, client:Client, callback=None):
if callback is not None:
callback("File added successfully",MSG_TYPE.MSG_TYPE_INFO)
self.personality.add_file(path,callback=callback)
self.personality.add_file(path, client=client,callback=callback)
return True
def remove_file(self, path):

View File

@ -97,7 +97,7 @@ def add_events(sio:socketio):
if is_last_chunk:
lollmsElfServer.success('File received and saved successfully')
if lollmsElfServer.personality.processor:
result = lollmsElfServer.personality.processor.add_file(file_path, partial(lollmsElfServer.process_chunk, client_id=client_id))
result = lollmsElfServer.personality.processor.add_file(file_path, client, partial(lollmsElfServer.process_chunk, client_id=client_id))
else:
result = lollmsElfServer.personality.add_file(file_path, client, partial(lollmsElfServer.process_chunk, client_id=client_id))