changed method names

This commit is contained in:
Saifeddine ALOUI 2024-08-16 00:26:11 +02:00
parent e8a141b8bd
commit 2501ed87b4
6 changed files with 8 additions and 8 deletions

View File

@ -902,7 +902,7 @@ class Discussion:
pth = discussion_path_to_url(view_file)
self.lollms.new_message(client.client_id if client is not None else 0, content = "", message_type = MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_SET_CONTENT)
output = f'<img src="{pth}" width="800">\n\n'
self.lollms.full(output, client_id=client.client_id)
self.lollms.set_message_content(output, client_id=client.client_id)
self.lollms.close_message(client.client_id if client is not None else 0)
if self.lollms.model.binding_type not in [BindingType.TEXT_IMAGE, BindingType.TEXT_IMAGE_VIDEO]:
@ -913,7 +913,7 @@ class Discussion:
img = img.convert("RGB")
output += "## image description :\n"+ self.lollms.model.interrogate_blip([img])[0]
# output += "## image description :\n"+ self.lollms.model.qna_blip([img],"q:Describe this photo with as much details as possible.\na:")[0]
self.lollms.full(output)
self.lollms.set_message_content(output)
self.lollms.close_message(client.client_id if client is not None else 0)
self.lollms.HideBlockingMessage("Understanding image (please wait)")
if self.lollms.config.debug:

View File

@ -118,7 +118,7 @@ def start_writing_story(
)
plan_response = llm.fast_gen(plan_prompt).strip()
llm.step_end("Building the story architecture")
llm.chunk("\n")
llm.add_chunk_to_message_content("\n")
# Extract JSON code block from the response
code_blocks = llm.extract_code_blocks(plan_response)
if not code_blocks:

View File

@ -115,7 +115,7 @@ def take_photo(processor, client, use_ui=False, use_a_single_photo_at_a_time=Tru
client.discussion.image_files.append(fn)
if use_a_single_photo_at_a_time:
client.discussion.image_files = [client.discussion.image_files[-1]]
processor.full(f'<img src="{discussion_path_to_url(fn_view)}" width="80%"></img>')
processor.set_message_content(f'<img src="{discussion_path_to_url(fn_view)}" width="80%"></img>')
processor.new_message("")
return "Image shot successful"

View File

@ -25,7 +25,7 @@ def read_text(text: str, tts_module:LollmsTTS, llm:APScript) -> str:
try:
# Generate audio from the text
audio_file_path = tts_module.tts_audio(text)
llm.chunk(text)
llm.add_chunk_to_message_content(text)
llm.new_message("")
# Return the path to the generated audio file

View File

@ -3077,7 +3077,7 @@ class APScript(StateMachine):
if callback:
callback(code, MSG_OPERATION_TYPE.MSG_TYPE_CODE)
def chunk(self, full_text:str, callback: Callable[[str | list | None, MSG_OPERATION_TYPE, str, Any | None], bool]=None):
def add_chunk_to_message_content(self, full_text:str, callback: Callable[[str | list | None, MSG_OPERATION_TYPE, str, Any | None], bool]=None):
"""This sends full text to front end
Args:
@ -4185,7 +4185,7 @@ class APScript(StateMachine):
nested_function_calls = 0
while len(function_calls)>0 and nested_function_calls<max_nested_function_calls:
nested_function_calls += 1
self.chunk("\n")
self.add_chunk_to_message_content("\n")
if hide_function_call:
self.set_message_content("") #Hide function

View File

@ -378,7 +378,7 @@ class TasksLibrary:
if callback:
callback(code, MSG_OPERATION_TYPE.MSG_TYPE_CODE)
def chunk(self, full_text:str, callback: Callable[[str | list | None, MSG_OPERATION_TYPE, str, Any | None], bool]=None):
def add_chunk_to_message_content(self, full_text:str, callback: Callable[[str | list | None, MSG_OPERATION_TYPE, str, Any | None], bool]=None):
"""This sends full text to front end
Args: