mirror of
https://github.com/ParisNeo/lollms-webui.git
synced 2025-02-11 13:05:22 +00:00
moved to threading instead of threadpools
This commit is contained in:
parent
917600f412
commit
79426ce9d0
15
app.py
15
app.py
@ -20,7 +20,7 @@ import argparse
|
|||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
import traceback
|
import traceback
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
import threading
|
||||||
import sys
|
import sys
|
||||||
from pyGpt4All.db import DiscussionsDB, Discussion
|
from pyGpt4All.db import DiscussionsDB, Discussion
|
||||||
from flask import (
|
from flask import (
|
||||||
@ -263,8 +263,10 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
|||||||
self.discussion_messages = self.prepare_query(message_id)
|
self.discussion_messages = self.prepare_query(message_id)
|
||||||
self.prepare_reception()
|
self.prepare_reception()
|
||||||
self.generating = True
|
self.generating = True
|
||||||
app.config['executor'] = ThreadPoolExecutor(max_workers=1)
|
# app.config['executor'] = ThreadPoolExecutor(max_workers=1)
|
||||||
app.config['executor'].submit(self.generate_message)
|
# app.config['executor'].submit(self.generate_message)
|
||||||
|
tpe = threading.Thread(target=self.generate_message)
|
||||||
|
tpe.start()
|
||||||
while self.generating:
|
while self.generating:
|
||||||
try:
|
try:
|
||||||
while not self.text_queue.empty():
|
while not self.text_queue.empty():
|
||||||
@ -279,7 +281,8 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
|||||||
time.sleep(0.1)
|
time.sleep(0.1)
|
||||||
if self.cancel_gen:
|
if self.cancel_gen:
|
||||||
self.generating = False
|
self.generating = False
|
||||||
app.config['executor'].shutdown(True)
|
tpe = None
|
||||||
|
gc.collect()
|
||||||
print("## Done ##")
|
print("## Done ##")
|
||||||
self.current_discussion.update_message(response_id, self.bot_says)
|
self.current_discussion.update_message(response_id, self.bot_says)
|
||||||
self.full_message_list.append(self.bot_says)
|
self.full_message_list.append(self.bot_says)
|
||||||
@ -583,8 +586,8 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
personality = load_config(f"personalities/{config['personality_language']}/{config['personality_category']}/{config['personality']}.yaml")
|
personality = load_config(f"personalities/{config['personality_language']}/{config['personality_category']}/{config['personality']}.yaml")
|
||||||
|
|
||||||
executor = ThreadPoolExecutor(max_workers=1)
|
# executor = ThreadPoolExecutor(max_workers=1)
|
||||||
app.config['executor'] = executor
|
# app.config['executor'] = executor
|
||||||
|
|
||||||
bot = Gpt4AllWebUI(app, config, personality, config_file_path)
|
bot = Gpt4AllWebUI(app, config, personality, config_file_path)
|
||||||
|
|
||||||
|
@ -50,6 +50,7 @@ class LLAMACPP(GPTBackend):
|
|||||||
new_text_callback (Callable[[str], None], optional): A callback function that is called everytime a new text element is generated. Defaults to None.
|
new_text_callback (Callable[[str], None], optional): A callback function that is called everytime a new text element is generated. Defaults to None.
|
||||||
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
|
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
|
||||||
"""
|
"""
|
||||||
|
try:
|
||||||
self.model.generate(
|
self.model.generate(
|
||||||
prompt,
|
prompt,
|
||||||
new_text_callback=new_text_callback,
|
new_text_callback=new_text_callback,
|
||||||
@ -62,3 +63,5 @@ class LLAMACPP(GPTBackend):
|
|||||||
n_threads=self.config['n_threads'],
|
n_threads=self.config['n_threads'],
|
||||||
verbose=verbose
|
verbose=verbose
|
||||||
)
|
)
|
||||||
|
except Exception as ex:
|
||||||
|
print(ex)
|
Loading…
x
Reference in New Issue
Block a user