Moved function calls to new format

This commit is contained in:
Saifeddine ALOUI 2025-03-07 20:51:15 +01:00
parent 91e81e1415
commit d0e86906e9
4 changed files with 99 additions and 107 deletions

View File

@ -1376,36 +1376,6 @@ Answer directly with the reformulation of the last prompt.
with open(dr/"config.yaml", "r") as f:
fc_dict = yaml.safe_load(f.read())
# let us check static settings from fc_dict
if 'static_parameters' in fc_dict:
# Extract category and name from fc_dict
category = fc_dict.get('category')
name = fc_dict.get('name')
# Build the configuration file path
config_path = self.lollms_paths.personal_configuration_path / "function_calls" / category / name / "config.yaml"
# Check if the configuration file exists
if config_path.exists():
# Load existing configuration
with open(config_path, 'r') as f:
static_parameters = yaml.safe_load(f)
else:
# Create default configuration
static_parameters = {}
# Extract default values from fc_dict's static_parameters
for param in fc_dict['static_parameters']:
static_parameters[param['name']] = param.get('default',"")
# Create parent directories if they don't exist
config_path.parent.mkdir(parents=True, exist_ok=True)
# Save the default configuration
with open(config_path, 'w') as f:
yaml.dump(static_parameters, f)
else:
# the function doesn't need static parameters
static_parameters = {}
# Step 1: Construct the full path to the function.py module
module_path = dr / "function.py"
module_name = "function" # Name for the loaded module
@ -1424,7 +1394,7 @@ Answer directly with the reformulation of the last prompt.
class_ = getattr(module, class_name)
# Step 4: Create an instance of the class and store it in fc_dict["class"]
fc_dict["class"] = class_(self, client, static_parameters)
fc_dict["class"] = class_(self, client)
function_calls.append(fc_dict)
except Exception as ex:
self.error("Couldn't add function call to context")

View File

@ -2,6 +2,8 @@ from functools import partial
from typing import Dict, Any, List
from enum import Enum, auto
from lollms.client_session import Client
from lollms.com import LoLLMsCom
from lollms.config import TypedConfig, ConfigTemplate, BaseConfig
# Step 1: Define the FunctionType enum
class FunctionType(Enum):
@ -11,10 +13,28 @@ class FunctionType(Enum):
# Step 2: Update the FunctionCall base class
class FunctionCall:
def __init__(self, function_type: FunctionType, client: Client, static_parameters=dict):
def __init__(self, name:str, app:LoLLMsCom, function_type: FunctionType, client: Client, static_parameters:TypedConfig=None):
self.name = name
self.app = app
self.personality = app.personality
self.function_type = function_type
self.client = client
self.static_parameters = static_parameters
if static_parameters is not None:
self.static_parameters = static_parameters
self.sync_configuration()
def sync_configuration(self):
self.configuration_file_path = self.app.lollms_paths.personal_configuration_path/"services"/self.name/f"config.yaml"
self.configuration_file_path.parent.mkdir(parents=True, exist_ok=True)
self.static_parameters.file_path = self.configuration_file_path
try:
self.static_parameters.config.load_config()
except:
self.static_parameters.config.save_config()
self.static_parameters.sync()
def settings_updated(self):
pass
def execute(self, context, *args, **kwargs):
"""
@ -32,75 +52,4 @@ class FunctionCall:
def process_output(self, context, llm_output:str):
if self.function_type == FunctionType.CONTEXT_UPDATE:
raise NotImplementedError("Subclasses must implement the process_output for CONTEXT_UPDATE functions.")
# from lollms.tasks import TasksLibrary
# class FunctionCalling_Library:
# def __init__(self, tasks_library:TasksLibrary):
# self.tl = tasks_library
# self.function_definitions = []
# def register_function(self, function_name, function_callable, function_description, function_parameters):
# self.function_definitions.append({
# "function_name": function_name,
# "function": function_callable,
# "function_description": function_description,
# "function_parameters": function_parameters
# })
# def unregister_function(self, function_name):
# self.function_definitions = [func for func in self.function_definitions if func["function_name"] != function_name]
# def execute_function_calls(self, function_calls: List[Dict[str, Any]]) -> List[Any]:
# """
# Executes the function calls with the parameters extracted from the generated text,
# using the original functions list to find the right function to execute.
# Args:
# function_calls (List[Dict[str, Any]]): A list of dictionaries representing the function calls.
# function_definitions (List[Dict[str, Any]]): The original list of functions with their descriptions and callable objects.
# Returns:
# List[Any]: A list of results from executing the function calls.
# """
# results = []
# # Convert function_definitions to a dict for easier lookup
# functions_dict = {func['function_name']: func['function'] for func in self.function_definitions}
# for call in function_calls:
# function_name = call.get("function_name")
# parameters = call.get("function_parameters", [])
# function = functions_dict.get(function_name)
# if function:
# try:
# # Assuming parameters is a dictionary that maps directly to the function's arguments.
# if type(parameters)==list:
# result = function(*parameters)
# elif type(parameters)==dict:
# result = function(**parameters)
# results.append(result)
# except TypeError as e:
# # Handle cases where the function call fails due to incorrect parameters, etc.
# results.append(f"Error calling {function_name}: {e}")
# else:
# results.append(f"Function {function_name} not found.")
# return results
# def generate_with_functions(self, prompt):
# # Assuming generate_with_function_calls is a method from TasksLibrary
# ai_response, function_calls = self.tl.generate_with_function_calls(prompt, self.function_definitions)
# return ai_response, function_calls
# def generate_with_functions_with_images(self, prompt, image_files):
# # Assuming generate_with_function_calls_and_images is a method from TasksLibrary
# if len(image_files) > 0:
# ai_response, function_calls = self.tl.generate_with_function_calls_and_images(prompt, image_files, self.function_definitions)
# else:
# ai_response, function_calls = self.tl.generate_with_function_calls(prompt, self.function_definitions)
# return ai_response, function_calls
raise NotImplementedError("Subclasses must implement the process_output for CONTEXT_UPDATE functions.")

View File

@ -212,4 +212,57 @@ async def toggle_function_call(request: Request):
entry["selected"] = not entry["selected"]
lollmsElfServer.config.save_config()
return {"status": True, "message": "Function mounted successfully"}
return {"status": True, "message": "Function mounted successfully"}
@router.post("/get_function_call_settings")
async def get_function_call_settings(request: Request):
data = await request.json()
check_access(lollmsElfServer,data["client_id"])
fn_dir = data.get("dir")
function_name = data.get("name")
# Add new entry
for entry in lollmsElfServer.config.mounted_function_calls:
if entry["name"] == function_name and entry["dir"] == str(fn_dir):
if hasattr(entry,"static_params"):
return entry.static_params.config_template.template
else:
return {}
return {}
@router.post("/set_function_call_settings")
async def set_function_call_settings(request: Request):
data = await request.json()
check_access(lollmsElfServer,data["client_id"])
settings = data["settings"]
"""
Sets the active ttv settings.
:param request: The ttvSettingsRequest object.
:return: A JSON response with the status of the operation.
"""
try:
print("- Setting function call settings")
fn_dir = data.get("dir")
function_name = data.get("name")
# Add new entry
for entry in lollmsElfServer.config.mounted_function_calls:
if entry["name"] == function_name and entry["dir"] == str(fn_dir):
if hasattr(entry,"static_params"):
entry.static_params.update_template(settings)
entry.static_params.config.save_config()
entry.static_params.settings_updated()
return {'status':True}
else:
return {'status':False}
else:
return {'status':False}
else:
return {'status':False}
except Exception as ex:
trace_exception(ex)
lollmsElfServer.error(ex)
return {"status":False,"error":str(ex)}

View File

@ -31,7 +31,7 @@ class LollmsNovitaAITextToVideo(LollmsTTV):
service_config = TypedConfig(
ConfigTemplate([
{"name":"api_key", "type":"str", "value":api_key, "help":"A valid Novita AI key to generate text using anthropic api"},
{"name":"generation_engine","type":"str","value":"stable_diffusion", "options": ["stable_diffusion", "hunyuan-video-fast"], "help":"The engine name"},
{"name":"generation_engine","type":"str","value":"stable_diffusion", "options": ["stable_diffusion", "hunyuan-video-fast", "wan-t2v"], "help":"The engine name"},
{"name":"sd_model_name","type":"str","value":"darkSushiMixMix_225D_64380.safetensors", "options": ["darkSushiMixMix_225D_64380.safetensors"], "help":"The model name"}
]),
BaseConfig(config={
@ -123,6 +123,26 @@ class LollmsNovitaAITextToVideo(LollmsTTV):
}
response = requests.request("POST", url, json=payload, headers=headers)
elif self.service_config.generation_engine=="wan-t2v":
width, height, nb_frames, steps = self.pin_dimensions_frames_steps(width, height, nb_frames, steps)
url = "https://api.novita.ai/v3/async/wan-t2v"
payload = {
"model_name": "wan-t2v",
"width": width,
"height": height,
"seed": seed,
"steps": steps,
"prompt": prompt,
"frames": nb_frames
}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.service_config.api_key}"
}
response = requests.request("POST", url, json=payload, headers=headers)
elif self.service_config.generation_engine=="stable_diffusion":
print(response.text)
if model_name=="":