This commit is contained in:
Saifeddine ALOUI 2024-06-05 01:21:49 +02:00
parent 2358631078
commit cf7c1e2d88

View File

@ -419,29 +419,29 @@ class LollmsApplication(LoLLMsCom):
trace_exception(ex)
self.warning(f"Couldn't load Motion control")
if self.config.active_tti_service == "diffusers" and (self.tti is None or type(self.tti.name)!="diffusers"):
if self.config.active_tti_service == "diffusers" and (self.tti is None or self.tti.name!="diffusers"):
from lollms.services.diffusers.lollms_diffusers import LollmsDiffusers
self.tti = LollmsDiffusers(self)
elif self.config.active_tti_service == "autosd" and (self.tti is None or type(self.tti.name)!="stable_diffusion"):
elif self.config.active_tti_service == "autosd" and (self.tti is None or self.tti.name!="stable_diffusion"):
from lollms.services.sd.lollms_sd import LollmsSD
self.tti = LollmsSD(self)
elif self.config.active_tti_service == "dall-e" and (self.tti is None or type(self.tti.name)!="dall-e-2" or type(self.tti.name)!="dall-e-3"):
elif self.config.active_tti_service == "dall-e" and (self.tti is None or self.tti.name!="dall-e-2" or type(self.tti.name)!="dall-e-3"):
from lollms.services.dalle.lollms_dalle import LollmsDalle
self.tti = LollmsDalle(self, self.config.dall_e_key)
elif self.config.active_tti_service == "midjourney" and (self.tti is None or type(self.tti.name)!="midjourney"):
elif self.config.active_tti_service == "midjourney" and (self.tti is None or self.tti.name!="midjourney"):
from lollms.services.midjourney.lollms_midjourney import LollmsMidjourney
self.tti = LollmsMidjourney(self, self.config.midjourney_key)
if self.config.active_tts_service == "openai_tts" and (self.tts is None or type(self.tts.name)!="openai_tts"):
if self.config.active_tts_service == "openai_tts" and (self.tts is None or self.tts.name!="openai_tts"):
from lollms.services.open_ai_tts.lollms_openai_tts import LollmsOpenAITTS
self.tts = LollmsOpenAITTS(self, self.config.openai_tts_model, self.config.openai_tts_voice, self.config.openai_tts_key)
elif self.config.active_tts_service == "xtts" and self.xtts:
self.tts = self.xtts
if self.config.active_stt_service == "openai_whisper" and (self.tts is None or type(self.tts.name)!="openai_whisper"):
if self.config.active_stt_service == "openai_whisper" and (self.tts is None or self.tts.name!="openai_whisper"):
from lollms.services.openai_whisper.lollms_openai_whisper import LollmsOpenAIWhisper
self.stt = LollmsOpenAIWhisper(self, self.config.openai_whisper_model, self.config.openai_whisper_key)
elif self.config.active_stt_service == "whisper" and (self.tts is None or type(self.tts.name)!="whisper") :
elif self.config.active_stt_service == "whisper" and (self.tts is None or self.tts.name!="whisper") :
from lollms.services.whisper.lollms_whisper import LollmsWhisper
self.stt = LollmsWhisper(self, self.config.whisper_model)
@ -1055,7 +1055,6 @@ class LollmsApplication(LoLLMsCom):
# Initialize a list to store the full messages
full_message_list = []
full_message = ""
# If this is not a continue request, we add the AI prompt
if not is_continue:
message_tokenized = self.model.tokenize(
@ -1064,7 +1063,6 @@ class LollmsApplication(LoLLMsCom):
full_message_list.append(message_tokenized)
# Update the cumulative number of tokens
tokens_accumulated += len(message_tokenized)
full_message += self.personality.ai_message_prefix.strip()
if generation_type != "simple_question":
@ -1089,12 +1087,11 @@ class LollmsApplication(LoLLMsCom):
msg_value
)
# Check if adding the message will exceed the available space
if tokens_accumulated + len(message_tokenized) > available_space-n_tokens:
if tokens_accumulated + len(message_tokenized) > available_space:
# Update the cumulative number of tokens
msg = message_tokenized[-(available_space-tokens_accumulated-n_tokens):]
tokens_accumulated += available_space-tokens_accumulated-n_tokens
msg = message_tokenized[-(available_space-tokens_accumulated):]
tokens_accumulated += available_space-tokens_accumulated
full_message_list.insert(0, msg)
full_message = self.personality.ai_message_prefix.strip()+full_message
break
# Add the tokenized message to the full_message_list