mirror of
https://github.com/ParisNeo/lollms-webui.git
synced 2025-04-13 22:02:58 +00:00
Upgraded code
This commit is contained in:
parent
f201e1bd8a
commit
2208010bd5
@ -87,9 +87,12 @@ class ModelProcess:
|
||||
self.clear_queue_queue = mp.Queue(maxsize=1)
|
||||
self.set_config_queue = mp.Queue(maxsize=1)
|
||||
self.set_config_result_queue = mp.Queue(maxsize=1)
|
||||
self.started_queue = mp.Queue()
|
||||
|
||||
self.process = None
|
||||
self.is_generating = mp.Value('i', 0)
|
||||
# Create synchronization objects
|
||||
self.start_signal = mp.Event()
|
||||
self.completion_signal = mp.Event()
|
||||
|
||||
self.model_ready = mp.Value('i', 0)
|
||||
self.curent_text = ""
|
||||
self.ready = False
|
||||
@ -107,6 +110,24 @@ class ModelProcess:
|
||||
'errors':[]
|
||||
}
|
||||
|
||||
def remove_text_from_string(self, string, text_to_find):
|
||||
"""
|
||||
Removes everything from the first occurrence of the specified text in the string (case-insensitive).
|
||||
|
||||
Parameters:
|
||||
string (str): The original string.
|
||||
text_to_find (str): The text to find in the string.
|
||||
|
||||
Returns:
|
||||
str: The updated string.
|
||||
"""
|
||||
index = string.lower().find(text_to_find.lower())
|
||||
|
||||
if index != -1:
|
||||
string = string[:index]
|
||||
|
||||
return string
|
||||
|
||||
def load_binding(self, binding_name:str, install=False):
|
||||
if install:
|
||||
print(f"Loading binding {binding_name} install ON")
|
||||
@ -166,10 +187,13 @@ class ModelProcess:
|
||||
return self.set_config_result_queue.get()
|
||||
|
||||
def generate(self, full_prompt, prompt, id, n_predict):
|
||||
self.start_signal.clear()
|
||||
self.generate_queue.put((full_prompt, prompt, id, n_predict))
|
||||
|
||||
def cancel_generation(self):
|
||||
self.completion_signal.set()
|
||||
self.cancel_queue.put(('cancel',))
|
||||
print("Canel request received")
|
||||
|
||||
def clear_queue(self):
|
||||
self.clear_queue_queue.put(('clear_queue',))
|
||||
@ -279,8 +303,6 @@ class ModelProcess:
|
||||
command = self.generate_queue.get()
|
||||
if command is not None:
|
||||
if self.cancel_queue.empty() and self.clear_queue_queue.empty():
|
||||
self.is_generating.value = 1
|
||||
self.started_queue.put(1)
|
||||
self.id=command[2]
|
||||
self.n_predict=command[3]
|
||||
if self.personality.processor is not None:
|
||||
@ -288,15 +310,18 @@ class ModelProcess:
|
||||
if "custom_workflow" in self.personality.processor_cfg:
|
||||
if self.personality.processor_cfg["custom_workflow"]:
|
||||
print("Running workflow")
|
||||
self.completion_signal.clear()
|
||||
self.start_signal.set()
|
||||
output = self.personality.processor.run_workflow(self._generate, command[1], command[0], self._callback)
|
||||
self._callback(output, 0)
|
||||
self.is_generating.value = 0
|
||||
self.completion_signal.set()
|
||||
print("Finished executing the workflow")
|
||||
continue
|
||||
|
||||
self.start_signal.set()
|
||||
self.completion_signal.clear()
|
||||
self._generate(command[0], self.n_predict, self._callback)
|
||||
while not self.generation_queue.empty():
|
||||
time.sleep(1)
|
||||
self.is_generating.value = 0
|
||||
self.completion_signal.set()
|
||||
print("Finished executing the generation")
|
||||
except Exception as ex:
|
||||
print(ex)
|
||||
time.sleep(1)
|
||||
@ -357,14 +382,14 @@ class ModelProcess:
|
||||
# Stream the generated text to the main process
|
||||
self.generation_queue.put((text,self.id, text_type))
|
||||
# if stop generation is detected then stop
|
||||
if self.is_generating.value==1:
|
||||
if self.completion_signal.is_set():
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
else:
|
||||
self.curent_text = self.remove_text_from_string(self.curent_text, anti_prompt_to_remove)
|
||||
self._cancel_generation()
|
||||
print("The model is halucinating")
|
||||
return False
|
||||
|
||||
|
||||
def _check_cancel_queue(self):
|
||||
@ -391,7 +416,7 @@ class ModelProcess:
|
||||
self.set_config_result_queue.put(self._set_config_result)
|
||||
|
||||
def _cancel_generation(self):
|
||||
self.is_generating.value = 0
|
||||
self.completion_signal.set()
|
||||
|
||||
def _clear_queue(self):
|
||||
while not self.generate_queue.empty():
|
||||
@ -679,24 +704,6 @@ class GPT4AllAPI():
|
||||
return discussion_messages # Removes the last return
|
||||
|
||||
|
||||
def remove_text_from_string(self, string, text_to_find):
|
||||
"""
|
||||
Removes everything from the first occurrence of the specified text in the string (case-insensitive).
|
||||
|
||||
Parameters:
|
||||
string (str): The original string.
|
||||
text_to_find (str): The text to find in the string.
|
||||
|
||||
Returns:
|
||||
str: The updated string.
|
||||
"""
|
||||
index = string.lower().find(text_to_find.lower())
|
||||
|
||||
if index != -1:
|
||||
string = string[:index]
|
||||
|
||||
return string
|
||||
|
||||
def process_chunk(self, chunk, message_type):
|
||||
self.bot_says += chunk
|
||||
self.socketio.emit('message', {
|
||||
@ -739,15 +746,14 @@ class GPT4AllAPI():
|
||||
# prepare query and reception
|
||||
self.discussion_messages, self.current_message = self.prepare_query(message_id)
|
||||
self.prepare_reception()
|
||||
self.generating = True
|
||||
self.generating = True
|
||||
self.process.generate(self.discussion_messages, self.current_message, message_id, n_predict = self.config['n_predict'])
|
||||
self.process.started_queue.get()
|
||||
while(self.process.is_generating.value or not self.process.generation_queue.empty()): # Simulating other commands being issued
|
||||
while(not self.process.completion_signal.is_set() or not self.process.generation_queue.empty()): # Simulating other commands being issued
|
||||
try:
|
||||
chunk, tok, message_type = self.process.generation_queue.get(False, 2)
|
||||
if chunk!="":
|
||||
self.process_chunk(chunk, message_type)
|
||||
except:
|
||||
except Exception as ex:
|
||||
time.sleep(0.1)
|
||||
|
||||
print()
|
||||
|
4
app.py
4
app.py
@ -270,7 +270,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
for personality_folder in category_folder.iterdir():
|
||||
if personality_folder.is_dir():
|
||||
try:
|
||||
personality_info = {}
|
||||
personality_info = {"folder":personality_folder.stem}
|
||||
config_path = personality_folder / 'config.yaml'
|
||||
with open(config_path) as config_file:
|
||||
config_data = yaml.load(config_file, Loader=yaml.FullLoader)
|
||||
@ -624,7 +624,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
|
||||
|
||||
def get_generation_status(self):
|
||||
return jsonify({"status":self.process.is_generating.value==1})
|
||||
return jsonify({"status":self.process.start_signal.is_set()})
|
||||
|
||||
def stop_gen(self):
|
||||
self.cancel_gen = True
|
||||
|
@ -140,4 +140,13 @@ Oh, and let's not forget! You can even set the number of images to generate for
|
||||
|
||||
Feel free to explore and make tweaks according to your preferences. It's your playground now!
|
||||
|
||||
Now, let's delve into another fascinating personality: the Tree of Thoughts personality. This particular personality has gained quite a reputation and is highly sought after. Its main goal is to explore a technique for enhancing AI reasoning.
|
||||
|
||||
The Tree of Thoughts technique takes an innovative approach by breaking down an answer into multiple thoughts. Here's how it works: the AI generates a number of thoughts related to the question or prompt. Then, it evaluates and assesses these thoughts, ultimately selecting the best one. This chosen thought is then incorporated into the AI's previous context, prompting it to generate another thought. This iterative process repeats multiple times.
|
||||
|
||||
By the end of this dynamic thought generation process, the AI accumulates a collection of the best thoughts it has generated. These thoughts are then synthesized into a comprehensive summary, which is presented to the user as a result. It's a remarkable way to tackle complex subjects and generate creative solutions.
|
||||
|
||||
Now, let's put this Tree of Thoughts personality to the test with a challenging topic: finding solutions to the climate change problem. Brace yourself for an engaging and thought-provoking exploration of ideas and potential strategies.
|
||||
|
||||
Here are the
|
||||
|
||||
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
4
web/dist/index.html
vendored
4
web/dist/index.html
vendored
@ -6,8 +6,8 @@
|
||||
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>GPT4All - WEBUI</title>
|
||||
<script type="module" crossorigin src="/assets/index-4ec4d9d4.js"></script>
|
||||
<link rel="stylesheet" href="/assets/index-99ba4d99.css">
|
||||
<script type="module" crossorigin src="/assets/index-0e9c5983.js"></script>
|
||||
<link rel="stylesheet" href="/assets/index-c9ce79a4.css">
|
||||
</head>
|
||||
<body>
|
||||
<div id="app"></div>
|
||||
|
@ -599,7 +599,7 @@ export default {
|
||||
if (pers.personality) {
|
||||
|
||||
this.settingsChanged = true
|
||||
const res = this.update_setting('personality', pers.personality.name, () => {
|
||||
const res = this.update_setting('personality', pers.personality.folder, () => {
|
||||
this.$refs.toast.showToast("Selected personality:\n" + pers.personality.name, 4, true)
|
||||
this.configFile.personality = pers.personality.name
|
||||
this.configFile.personality_category = pers.personality.category
|
||||
|
Loading…
x
Reference in New Issue
Block a user