Moving to v16

This commit is contained in:
Saifeddine ALOUI 2024-12-01 00:45:58 +01:00
parent bac226f35d
commit 4f8f42e038
4 changed files with 59 additions and 31 deletions

View File

@ -1433,11 +1433,7 @@ class LollmsApplication(LoLLMsCom):
ai_prefix = self.personality.ai_message_prefix
else:
ai_prefix = ""
# Build the final prompt by concatenating the conditionning and discussion messages
prompt_data = conditionning + internet_search_results + documentation + knowledge + user_description + discussion_messages + positive_boost + negative_boost + fun_mode + (self.separator_template + start_ai_header_id_template + ai_prefix + end_ai_header_id_template if not is_continue else '' if not self.config.use_continue_message else end_ai_header_id_template + "CONTINUE FROM HERE And do not open a new markdown code tag" + self.separator_template + start_ai_header_id_template + ai_prefix + end_ai_header_id_template)
# Tokenize the prompt data
tokens = self.model.tokenize(prompt_data)
# Details
context_details = {
@ -1460,12 +1456,13 @@ class LollmsApplication(LoLLMsCom):
"available_space":available_space,
"skills":skills,
"is_continue":is_continue,
"previous_chunk":previous_chunk
"previous_chunk":previous_chunk,
"prompt":current_message.content
}
if self.config.debug and not self.personality.processor:
ASCIIColors.highlight(documentation,"source_document_title", ASCIIColors.color_yellow, ASCIIColors.color_red, False)
# Return the prepared query, original message content, and tokenized query
return prompt_data, current_message.content, tokens, context_details, internet_search_infos
return context_details
# Properties ===============================================

View File

@ -249,6 +249,30 @@ class AIPersonality:
# Open and store the personality
self.load_personality()
def compute_n_predict(self, tokens):
return min(self.config.ctx_size-len(tokens)-1,self.config.max_n_predict if self.config.max_n_predict else self.config.ctx_size-len(tokens)-1)
def build_context(self, context_details, is_continue=False, return_tokens=False):
# Build the final prompt by concatenating the conditionning and discussion messages
prompt_data = self.separator_template.join(
[
context_details["conditionning"],
context_details["internet_search_results"],
context_details["documentation"],
context_details["knowledge"],
context_details["user_description"],
context_details["discussion_messages"],
context_details["positive_boost"],
context_details["negative_boost"],
context_details["fun_mode"],
self.ai_full_header if not is_continue else '' if not self.config.use_continue_message else "CONTINUE FROM HERE And do not open a new markdown code tag." + self.separator_template + self.ai_full_header
]
)
tokens = self.model.tokenize(prompt_data)
if return_tokens:
return prompt_data, tokens
else:
return prompt_data
def InfoMessage(self, content, duration:int=4, client_id=None, verbose:bool=True):
@ -3140,13 +3164,11 @@ Use this structure:
"formatted_string": formatted_string
}
def run_workflow(self, prompt:str, previous_discussion_text:str="", callback: Callable[[str | list | None, MSG_OPERATION_TYPE, str, AIPersonality| None], bool]=None, context_details:dict=None, client:Client=None):
def run_workflow(self, context_details:dict=None, client:Client=None, callback: Callable[[str | list | None, MSG_OPERATION_TYPE, str, AIPersonality| None], bool]=None):
"""
This function generates code based on the given parameters.
Args:
full_prompt (str): The full prompt for code generation.
prompt (str): The prompt for code generation.
context_details (dict): A dictionary containing the following context details for code generation:
- conditionning (str): The conditioning information.
- documentation (str): The documentation information.
@ -3158,14 +3180,12 @@ Use this structure:
- current_language (str): The force language information.
- fun_mode (str): The fun mode conditionning text
- ai_prefix (str): The AI prefix information.
n_predict (int): The number of predictions to generate.
client_id: The client ID for code generation.
callback (function, optional): The callback function for code generation.
Returns:
None
"""
return None

View File

@ -168,7 +168,7 @@ def add_events(sio:socketio):
full_discussion = personality.personality_conditioning + ''.join(full_discussion_blocks)
def callback(text, message_type: MSG_TYPE, metadata:dict={}):
def callback(text, message_type: MSG_OPERATION_TYPE, metadata:dict={}):
if message_type == MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_ADD_CHUNK:
lollmsElfServer.answer["full_text"] = lollmsElfServer.answer["full_text"] + text
run_async(partial(lollmsElfServer.sio.emit,'text_chunk', {'chunk': text}, to=client_id))
@ -189,7 +189,7 @@ def add_events(sio:socketio):
context_details = {
}
generated_text = personality.processor.run_workflow(prompt, previous_discussion_text=personality.personality_conditioning+fd, callback=callback, context_details=context_details, client=client)
generated_text = personality.processor.run_workflow(context_details, client=client, callback=callback)
else:
ASCIIColors.info("generating...")
generated_text = personality.model.generate(

View File

@ -124,9 +124,9 @@ class LollmsDiffusers(LollmsTTI):
shared_folder = root_dir/"shared"
self.diffusers_folder = shared_folder / "diffusers"
self.output_dir = root_dir / "outputs/diffusers"
self.models_dir = self.diffusers_folder / "models"
self.tti_models_dir = self.diffusers_folder / "models"
self.output_dir.mkdir(parents=True, exist_ok=True)
self.models_dir.mkdir(parents=True, exist_ok=True)
self.tti_models_dir.mkdir(parents=True, exist_ok=True)
ASCIIColors.red("")
ASCIIColors.red(" _ _ _ _ _ __ __ ")
@ -148,32 +148,34 @@ class LollmsDiffusers(LollmsTTI):
try:
if "stable-diffusion-3" in app.config.diffusers_model:
from diffusers import StableDiffusion3Pipeline # AutoPipelineForImage2Image#PixArtSigmaPipeline
self.model = StableDiffusion3Pipeline.from_pretrained(
app.config.diffusers_model, torch_dtype=torch.float16, cache_dir=self.models_dir,
self.tti_model = StableDiffusion3Pipeline.from_pretrained(
app.config.diffusers_model, torch_dtype=torch.float16, cache_dir=self.tti_models_dir,
use_safetensors=True,
)
self.iti_model = None
else:
from diffusers import AutoPipelineForText2Image # AutoPipelineForImage2Image#PixArtSigmaPipeline
self.model = AutoPipelineForText2Image.from_pretrained(
app.config.diffusers_model, torch_dtype=torch.float16, cache_dir=self.models_dir,
self.tti_model = AutoPipelineForText2Image.from_pretrained(
app.config.diffusers_model, torch_dtype=torch.float16, cache_dir=self.tti_models_dir,
use_safetensors=True,
)
self.iti_model = None
# AutoPipelineForText2Image
# self.model = StableDiffusionPipeline.from_pretrained(
# "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, cache_dir=self.models_dir,
# self.tti_model = StableDiffusionPipeline.from_pretrained(
# "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, cache_dir=self.tti_models_dir,
# use_safetensors=True,
# ) # app.config.diffusers_model
# Enable memory optimizations.
try:
if app.config.diffusers_offloading_mode=="sequential_cpu_offload":
self.model.enable_sequential_cpu_offload()
self.tti_model.enable_sequential_cpu_offload()
elif app.coinfig.diffusers_offloading_mode=="model_cpu_offload":
self.model.enable_model_cpu_offload()
self.tti_model.enable_model_cpu_offload()
except:
pass
except Exception as ex:
self.model= None
self.tti_model= None
trace_exception(ex)
@staticmethod
def verify(app:LollmsApplication):
@ -236,7 +238,7 @@ class LollmsDiffusers(LollmsTTI):
if sampler_name!="":
sc = self.get_scheduler_by_name(sampler_name)
if sc:
self.model.scheduler = sc
self.tti_model.scheduler = sc
width = adjust_dimensions(int(width))
height = adjust_dimensions(int(height))
@ -266,15 +268,15 @@ class LollmsDiffusers(LollmsTTI):
if seed!=-1:
generator = torch.Generator("cuda").manual_seed(seed)
image = self.model(positive_prompt, negative_prompt=negative_prompt, height=height, width=width, guidance_scale=scale, num_inference_steps=steps, generator=generator).images[0]
image = self.tti_model(positive_prompt, negative_prompt=negative_prompt, height=height, width=width, guidance_scale=scale, num_inference_steps=steps, generator=generator).images[0]
else:
image = self.model(positive_prompt, negative_prompt=negative_prompt, height=height, width=width, guidance_scale=scale, num_inference_steps=steps).images[0]
image = self.tti_model(positive_prompt, negative_prompt=negative_prompt, height=height, width=width, guidance_scale=scale, num_inference_steps=steps).images[0]
# Save the image
image.save(fn)
return fn, {"prompt":positive_prompt, "negative_prompt":negative_prompt}
def paint_from_images(self, positive_prompt: str,
images: List[str],
image: str,
negative_prompt: str = "",
sampler_name="",
seed=-1,
@ -287,18 +289,27 @@ class LollmsDiffusers(LollmsTTI):
output_path=None
) -> List[Dict[str, str]]:
import torch
from diffusers.utils import make_image_grid, load_image
if not self.iti_model:
from diffusers import AutoPipelineForImage2Image
self.iti_model = AutoPipelineForImage2Image.from_pretrained(
self.app.config.diffusers_model, torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
if sampler_name!="":
sc = self.get_scheduler_by_name(sampler_name)
if sc:
self.model.scheduler = sc
self.iti_model.scheduler = sc
img = load_image(image)
if output_path is None:
output_path = self.output_dir
if seed!=-1:
generator = torch.Generator("cuda").manual_seed(seed)
image = self.model(positive_prompt, negative_prompt=negative_prompt, height=height, width=width, guidance_scale=scale, num_inference_steps=steps, generator=generator).images[0]
image = self.titi_model(positive_prompt,image=img, negative_prompt=negative_prompt, height=height, width=width, guidance_scale=scale, num_inference_steps=steps, generator=generator).images[0]
else:
image = self.model(positive_prompt, negative_prompt=negative_prompt, height=height, width=width, guidance_scale=scale, num_inference_steps=steps).images[0]
image = self.iti_model(positive_prompt,image=img, negative_prompt=negative_prompt, height=height, width=width, guidance_scale=scale, num_inference_steps=steps).images[0]
output_path = Path(output_path)
fn = find_next_available_filename(output_path,"diff_img_")
# Save the image