This commit is contained in:
Saifeddine ALOUI 2024-05-20 02:24:30 +02:00
parent c5bd7dc42b
commit 6f45b1ca82
5 changed files with 88 additions and 16 deletions

View File

@ -12,7 +12,7 @@ from PyQt5 import QtWidgets, QtGui, QtCore
import sys
from functools import partial
def build_image(prompt, width, height, processor:APScript, client:Client):
def build_image(prompt, antiprompt, width, height, processor:APScript, client:Client):
try:
if processor.personality.config.active_tti_service=="autosd":
if not processor.personality.app.tti:
@ -23,7 +23,7 @@ def build_image(prompt, width, height, processor:APScript, client:Client):
processor.step_end("Loading ParisNeo's fork of AUTOMATIC1111's stable diffusion service")
file, infos = processor.personality.app.tti.paint(
prompt,
"",
antiprompt,
processor.personality.image_files,
width = width,
height = height,
@ -58,5 +58,5 @@ def build_image_function(processor, client):
"function_name": "build_image",
"function": partial(build_image, processor=processor, client=client),
"function_description": "Builds and shows an image from a prompt and width and height parameters. A square 1024x1024, a portrait woudl be 1024x1820 or landscape 1820x1024.",
"function_parameters": [{"name": "prompt", "type": "str"}, {"name": "width", "type": "int"}, {"name": "height", "type": "int"}]
"function_parameters": [{"name": "prompt", "type": "str"}, {"name": "antiprompt", "type": "str"}, {"name": "width", "type": "int"}, {"name": "height", "type": "int"}]
}

View File

@ -0,0 +1,33 @@
from lollms.utilities import PackageManager, find_first_available_file_index, discussion_path_to_url
from lollms.client_session import Client
from lollms.personality import APScript
if not PackageManager.check_package_installed("pyautogui"):
PackageManager.install_package("pyautogui")
if not PackageManager.check_package_installed("PyQt5"):
PackageManager.install_package("PyQt5")
from ascii_colors import trace_exception
import cv2
import time
from PyQt5 import QtWidgets, QtGui, QtCore
import sys
from functools import partial
from lollms.personality import AIPersonality
from typing import List
def list_personalities(processor:APScript, client:Client):
members:List[AIPersonality] = processor.personality.app.mounted_personalities
collective_infos = "Members information:\n"
for i,drone in enumerate(members):
if drone.name!=processor.personality.name:
collective_infos += f"member id: {i}\n"
collective_infos += f"member name: {drone.name}\n"
collective_infos += f"member description: {drone.personality_description[:126]}...\n"
return collective_infos
def list_personalities_function(processor, client):
return {
"function_name": "list_personalities",
"function": partial(list_personalities, processor=processor, client=client),
"function_description": "Lists the mounted ersonalities in the current sessions.",
"function_parameters": []
}

View File

@ -0,0 +1,33 @@
from lollms.utilities import PackageManager, find_first_available_file_index, discussion_path_to_url
from lollms.client_session import Client
from lollms.personality import APScript
if not PackageManager.check_package_installed("pyautogui"):
PackageManager.install_package("pyautogui")
if not PackageManager.check_package_installed("PyQt5"):
PackageManager.install_package("PyQt5")
from ascii_colors import trace_exception
import cv2
import time
from PyQt5 import QtWidgets, QtGui, QtCore
import sys
from functools import partial
from lollms.personality import AIPersonality
from typing import List
def summon_personality(member_id:int, prompt: str, previous_discussion_text:str, context_details, callback, processor:APScript, client:Client):
members:List[AIPersonality] = processor.personality.app.mounted_personalities
processor.personality.app.personality = members[member_id]
members[member_id].callback=callback
members[member_id].new_message("")
processor.personality.app.handle_generate_msg(client.client_id, {"prompt":prompt})
processor.personality.app.personality = processor.personality
return "Execution done"
def summon_personality_function(processor, callback, previous_discussion_text, context_details, client):
return {
"function_name": "summon_personality",
"function": partial(summon_personality, previous_discussion_text=previous_discussion_text, callback=callback, processor=processor, context_details=context_details, client=client),
"function_description": "Summon a personality by id and prompting it to do something.",
"function_parameters": [{"name": "member_id", "type": "int"},{"name": "prompt", "type": "str"}]
}

View File

@ -3284,7 +3284,7 @@ The AI should respond in this format using data from actions_list:
def generate_with_function_calls(self, prompt: str, functions: List[Dict[str, Any]], max_answer_length: Optional[int] = None) -> List[Dict[str, Any]]:
def generate_with_function_calls(self, prompt: str, functions: List[Dict[str, Any]], max_answer_length: Optional[int] = None, callback = None) -> List[Dict[str, Any]]:
"""
Performs text generation with function calls.
@ -3300,7 +3300,7 @@ The AI should respond in this format using data from actions_list:
upgraded_prompt = self._upgrade_prompt_with_function_info(prompt, functions)
# Generate the initial text based on the upgraded prompt.
generated_text = self.fast_gen(upgraded_prompt, max_answer_length)
generated_text = self.fast_gen(upgraded_prompt, max_answer_length, callback=callback)
# Extract the function calls from the generated text.
function_calls = self.extract_function_calls_as_json(generated_text)
@ -3308,7 +3308,7 @@ The AI should respond in this format using data from actions_list:
return generated_text, function_calls
def generate_with_function_calls_and_images(self, prompt: str, images:list, functions: List[Dict[str, Any]], max_answer_length: Optional[int] = None) -> List[Dict[str, Any]]:
def generate_with_function_calls_and_images(self, prompt: str, images:list, functions: List[Dict[str, Any]], max_answer_length: Optional[int] = None, callback = None) -> List[Dict[str, Any]]:
"""
Performs text generation with function calls.
@ -3324,7 +3324,7 @@ The AI should respond in this format using data from actions_list:
upgraded_prompt = self._upgrade_prompt_with_function_info(prompt, functions)
# Generate the initial text based on the upgraded prompt.
generated_text = self.fast_gen_with_images(upgraded_prompt, images, max_answer_length)
generated_text = self.fast_gen_with_images(upgraded_prompt, images, max_answer_length, callback=callback)
# Extract the function calls from the generated text.
function_calls = self.extract_function_calls_as_json(generated_text)
@ -3435,19 +3435,24 @@ The AI should respond in this format using data from actions_list:
return function_calls
def interact_with_function_call(self, prompt, function_definitions, prompt_after_execution=True):
def interact_with_function_call(self, prompt, function_definitions, prompt_after_execution=True, callback = None):
if len(self.personality.image_files)>0:
out, function_calls = self.generate_with_function_calls_and_images(prompt, self.personality.image_files, function_definitions)
out, function_calls = self.generate_with_function_calls_and_images(prompt, self.personality.image_files, function_definitions, callback=callback)
else:
out, function_calls = self.generate_with_function_calls(prompt, function_definitions)
out, function_calls = self.generate_with_function_calls(prompt, function_definitions, callback=callback)
if len(function_calls)>0:
outputs = self.execute_function_calls(function_calls,function_definitions)
out += "\n!@>function calls results:\n" + "\n".join([str(o) for o in outputs])
if prompt_after_execution:
prompt += out +"\n"+ "!@>"+self.personality.name+":"
if len(self.personality.image_files)>0:
out, function_calls = self.generate_with_function_calls_and_images(prompt, self.personality.image_files, function_definitions, callback=callback)
else:
out, function_calls = self.generate_with_function_calls(prompt, function_definitions, callback=callback)
if len(function_calls)>0:
outputs = self.execute_function_calls(function_calls,function_definitions)
out += "\n!@>function calls results:\n" + "\n".join([str(o) for o in outputs])
prompt += out +"\n"+ "!@>"+self.personality.name+":"
if len(self.personality.image_files)>0:
out, function_calls = self.generate_with_function_calls_and_images(prompt, self.personality.image_files, function_definitions)
else:
out, function_calls = self.generate_with_function_calls(prompt, function_definitions)
return out
#Helper method to convert outputs path to url

View File

@ -321,6 +321,7 @@ class LollmsXTTS(LollmsTTS):
text = re.sub(r'`.*?`', '', text)
# Remove any remaining code-like patterns (this can be adjusted as needed)
text = re.sub(r'[\{\}\[\]\(\)<>]', '', text)
text = text.replace("\\","")
def tts2_audio_th(thread_uid=None):
url = f"{self.xtts_base_url}/tts_to_audio"