upgraded. Added fun mode

This commit is contained in:
Saifeddine ALOUI 2024-01-12 00:24:43 +01:00
parent 3540a83145
commit d4b031942a
11 changed files with 211 additions and 4060 deletions

4
.gitignore vendored
View File

@ -212,4 +212,6 @@ output_*
mPLUG-Owl
xtts_models
xtts_models
models.txt

View File

@ -1,5 +1,5 @@
# =================== Lord Of Large Language Models Configuration file ===========================
version: 41
version: 42
binding_name: null
model_name: null

@ -1 +1 @@
Subproject commit 9aa7dbc1f33432b6e38deb54db2bd2b03b0a477d
Subproject commit 7f4de0e92f00ff16e2f954bd8477d44d35bf0d47

View File

@ -749,6 +749,14 @@ class LOLLMSWebUI(LOLLMSElfServer):
force_language=""
n_force_language = 0
if self.config.fun_mode:
fun_mode="\n!@>important information: Fun mode activated. Don't forget to sprincle some fun in the output.\n"
n_fun_mode = len(self.model.tokenize(positive_boost))
else:
fun_mode=""
n_fun_mode = 0
if generation_type != "simple_question":
if self.personality.persona_data_vectorizer:
if documentation=="":
@ -832,7 +840,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
# Calculate the total number of tokens between conditionning, documentation, and knowledge
total_tokens = n_cond_tk + n_doc_tk + n_history_tk + n_user_description_tk + n_positive_boost + n_negative_boost + n_force_language
total_tokens = n_cond_tk + n_doc_tk + n_history_tk + n_user_description_tk + n_positive_boost + n_negative_boost + n_force_language + n_fun_mode
# Calculate the available space for the messages
available_space = self.config.ctx_size - n_tokens - total_tokens
@ -909,7 +917,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
ai_prefix = self.model.detokenize(full_message_list[-1])
# Build the final prompt by concatenating the conditionning and discussion messages
prompt_data = conditionning + documentation + knowledge + user_description + discussion_messages + positive_boost + negative_boost + force_language + ai_prefix
prompt_data = conditionning + documentation + knowledge + user_description + discussion_messages + positive_boost + negative_boost + force_language + fun_mode + ai_prefix
# Tokenize the prompt data
tokens = self.model.tokenize(prompt_data)
@ -941,6 +949,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
"positive_boost":positive_boost,
"negative_boost":negative_boost,
"force_language":force_language,
"fun_mode":fun_mode,
"ai_prefix":ai_prefix
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

4
web/dist/index.html vendored
View File

@ -6,8 +6,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>LoLLMS WebUI - Welcome</title>
<script type="module" crossorigin src="/assets/index-ce5f22eb.js"></script>
<link rel="stylesheet" href="/assets/index-b4b20421.css">
<script type="module" crossorigin src="/assets/index-f92437ea.js"></script>
<link rel="stylesheet" href="/assets/index-048c2c80.css">
</head>
<body>
<div id="app"></div>

View File

@ -769,15 +769,15 @@
</tr>
<tr>
<td style="min-width: 200px;">
<label for="positive_boost" class="text-sm font-bold" style="margin-right: 1rem;">Positive Boost:</label>
<label for="fun_mode" class="text-sm font-bold" style="margin-right: 1rem;">Fun mode:</label>
</td>
<td>
<div class="flex flex-row">
<input
type="text"
id="positive_boost"
type="checkbox"
id="fun_mode"
required
v-model="configFile.positive_boost"
v-model="configFile.fun_mode"
@change="settingsChanged=true"
class="mt-1 px-2 py-1 border border-gray-300 rounded dark:bg-gray-600"
>

@ -1 +1 @@
Subproject commit 1bfd21219fea347efc3c90331e60c9f24efdc8c9
Subproject commit 6556d91d535f170b82829b973b63c474f08b5280