mirror of
https://github.com/ParisNeo/lollms-webui.git
synced 2025-04-10 12:19:58 +00:00
MOved to process based code
This commit is contained in:
parent
05fddbd804
commit
4d409530ee
239
app.py
239
app.py
@ -20,7 +20,6 @@ import argparse
|
||||
import json
|
||||
import re
|
||||
import traceback
|
||||
import threading
|
||||
import sys
|
||||
from tqdm import tqdm
|
||||
from pyaipersonality import AIPersonality
|
||||
@ -41,9 +40,12 @@ from geventwebsocket.handler import WebSocketHandler
|
||||
from gevent.pywsgi import WSGIServer
|
||||
import requests
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
import logging
|
||||
log = logging.getLogger('werkzeug')
|
||||
log.setLevel(logging.ERROR)
|
||||
|
||||
app = Flask("GPT4All-WebUI", static_url_path="/static", static_folder="static")
|
||||
socketio = SocketIO(app, cors_allowed_origins="*", async_mode='gevent', ping_timeout=30, ping_interval=15)
|
||||
socketio = SocketIO(app, cors_allowed_origins="*", async_mode='gevent', ping_timeout=60, ping_interval=15)
|
||||
|
||||
app.config['SECRET_KEY'] = 'secret!'
|
||||
# Set the logging level to WARNING or higher
|
||||
@ -61,12 +63,12 @@ import markdown
|
||||
|
||||
|
||||
class Gpt4AllWebUI(GPT4AllAPI):
|
||||
def __init__(self, _app, _socketio, config:dict, personality:dict, config_file_path) -> None:
|
||||
super().__init__(config, personality, config_file_path)
|
||||
def __init__(self, _app, _socketio, config:dict, config_file_path) -> None:
|
||||
super().__init__(config, _socketio, config_file_path)
|
||||
|
||||
self.app = _app
|
||||
self.cancel_gen = False
|
||||
self.socketio = _socketio
|
||||
|
||||
|
||||
if "use_new_ui" in self.config:
|
||||
if self.config["use_new_ui"]:
|
||||
@ -109,7 +111,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
self.add_endpoint("/", "", self.index, methods=["GET"])
|
||||
self.add_endpoint("/<path:filename>", "serve_static", self.serve_static, methods=["GET"])
|
||||
self.add_endpoint("/personalities/<path:filename>", "serve_personalities", self.serve_personalities, methods=["GET"])
|
||||
|
||||
|
||||
|
||||
self.add_endpoint("/export_discussion", "export_discussion", self.export_discussion, methods=["GET"])
|
||||
self.add_endpoint("/export", "export", self.export, methods=["GET"])
|
||||
@ -199,121 +201,6 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
"/get_current_personality", "get_current_personality", self.get_current_personality, methods=["GET"]
|
||||
)
|
||||
|
||||
# =========================================================================================
|
||||
# Socket IO stuff
|
||||
# =========================================================================================
|
||||
@socketio.on('connect')
|
||||
def connect():
|
||||
print('Client connected')
|
||||
|
||||
@socketio.on('disconnect')
|
||||
def disconnect():
|
||||
print('Client disconnected')
|
||||
|
||||
@socketio.on('install_model')
|
||||
def install_model(data):
|
||||
model_path = data["path"]
|
||||
progress = 0
|
||||
installation_dir = Path(f'./models/{self.config["backend"]}/')
|
||||
filename = Path(model_path).name
|
||||
installation_path = installation_dir / filename
|
||||
print("Model install requested")
|
||||
print(f"Model path : {model_path}")
|
||||
|
||||
if installation_path.exists():
|
||||
print("Error: Model already exists")
|
||||
data.installing = False
|
||||
socketio.emit('install_progress',{'status': 'failed', 'error': 'model already exists'})
|
||||
|
||||
socketio.emit('install_progress',{'status': 'progress', 'progress': progress})
|
||||
|
||||
response = requests.get(model_path, stream=True)
|
||||
file_size = int(response.headers.get('Content-Length'))
|
||||
downloaded_size = 0
|
||||
CHUNK_SIZE = 8192
|
||||
|
||||
def download_chunk(url, start_byte, end_byte, fileobj):
|
||||
headers = {'Range': f'bytes={start_byte}-{end_byte}'}
|
||||
response = requests.get(url, headers=headers, stream=True)
|
||||
downloaded_bytes = 0
|
||||
|
||||
for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
|
||||
if chunk:
|
||||
fileobj.seek(start_byte)
|
||||
fileobj.write(chunk)
|
||||
downloaded_bytes += len(chunk)
|
||||
start_byte += len(chunk)
|
||||
|
||||
return downloaded_bytes
|
||||
|
||||
|
||||
def download_file(url, file_path, num_threads=4):
|
||||
response = requests.head(url)
|
||||
file_size = int(response.headers.get('Content-Length'))
|
||||
chunk_size = file_size // num_threads
|
||||
progress = 0
|
||||
|
||||
with open(file_path, 'wb') as fileobj:
|
||||
with tqdm(total=file_size, unit='B', unit_scale=True, unit_divisor=1024) as pbar:
|
||||
with ThreadPoolExecutor(max_workers=num_threads) as executor:
|
||||
futures = []
|
||||
|
||||
for i in range(num_threads):
|
||||
start_byte = i * chunk_size
|
||||
end_byte = start_byte + chunk_size - 1 if i < num_threads - 1 else file_size - 1
|
||||
futures.append(executor.submit(download_chunk, url, start_byte, end_byte, fileobj))
|
||||
|
||||
for future in tqdm(as_completed(futures), total=num_threads):
|
||||
downloaded_bytes = future.result()
|
||||
progress += downloaded_bytes
|
||||
pbar.update(downloaded_bytes)
|
||||
socketio.emit('install_progress', {'status': 'progress', 'progress': progress})
|
||||
|
||||
# Usage example
|
||||
download_file(model_path, installation_path, num_threads=4)
|
||||
|
||||
socketio.emit('install_progress',{'status': 'succeeded', 'error': ''})
|
||||
|
||||
@socketio.on('uninstall_model')
|
||||
def uninstall_model(data):
|
||||
model_path = data['path']
|
||||
installation_dir = Path(f'./models/{self.config["backend"]}/')
|
||||
filename = Path(model_path).name
|
||||
installation_path = installation_dir / filename
|
||||
|
||||
if not installation_path.exists():
|
||||
socketio.emit('install_progress',{'status': 'failed', 'error': ''})
|
||||
|
||||
installation_path.unlink()
|
||||
socketio.emit('install_progress',{'status': 'succeeded', 'error': ''})
|
||||
|
||||
|
||||
|
||||
@socketio.on('generate_msg')
|
||||
def generate_msg(data):
|
||||
if self.current_discussion is None:
|
||||
if self.db.does_last_discussion_have_messages():
|
||||
self.current_discussion = self.db.create_discussion()
|
||||
else:
|
||||
self.current_discussion = self.db.load_last_discussion()
|
||||
|
||||
message = data["prompt"]
|
||||
message_id = self.current_discussion.add_message(
|
||||
"user", message, parent=self.message_id
|
||||
)
|
||||
|
||||
self.current_user_message_id = message_id
|
||||
tpe = threading.Thread(target=self.start_message_generation, args=(message, message_id))
|
||||
tpe.start()
|
||||
|
||||
@socketio.on('generate_msg_from')
|
||||
def handle_connection(data):
|
||||
message_id = int(data['id'])
|
||||
message = data["prompt"]
|
||||
self.current_user_message_id = message_id
|
||||
tpe = threading.Thread(target=self.start_message_generation, args=(message, message_id))
|
||||
tpe.start()
|
||||
|
||||
|
||||
|
||||
def save_settings(self):
|
||||
@ -405,9 +292,9 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
|
||||
elif setting_name== "model":
|
||||
self.config["model"]=data['setting_value']
|
||||
print("New model selected")
|
||||
print("update_settings : New model selected")
|
||||
# Build chatbot
|
||||
self.chatbot_bindings = self.create_chatbot()
|
||||
self.process.set_config(self.config)
|
||||
|
||||
elif setting_name== "backend":
|
||||
print("New backend selected")
|
||||
@ -415,13 +302,13 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
print("New backend selected")
|
||||
self.config["backend"]=data['setting_value']
|
||||
|
||||
backend_ =self.load_backend(self.BACKENDS_LIST[self.config["backend"]])
|
||||
backend_, model_ =self.process.rebuild_model(self.config)
|
||||
models = backend_.list_models(self.config)
|
||||
if len(models)>0:
|
||||
self.backend = backend_
|
||||
self.config['model'] = models[0]
|
||||
# Build chatbot
|
||||
self.chatbot_bindings = self.create_chatbot()
|
||||
self.process.set_config(self.config)
|
||||
if self.config["debug"]:
|
||||
print(f"Configuration {data['setting_name']} set to {data['setting_value']}")
|
||||
return jsonify({'setting_name': data['setting_name'], "status":True})
|
||||
@ -441,6 +328,9 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
|
||||
if self.config["debug"]:
|
||||
print(f"Configuration {data['setting_name']} set to {data['setting_value']}")
|
||||
|
||||
print("Configuration updated")
|
||||
self.process.set_config(self.config)
|
||||
# Tell that the setting was changed
|
||||
return jsonify({'setting_name': data['setting_name'], "status":True})
|
||||
|
||||
@ -549,69 +439,15 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
return jsonify({"discussion_text":self.get_discussion_to()})
|
||||
|
||||
|
||||
def start_message_generation(self, message, message_id):
|
||||
bot_says = ""
|
||||
|
||||
# send the message to the bot
|
||||
print(f"Received message : {message}")
|
||||
if self.current_discussion:
|
||||
# First we need to send the new message ID to the client
|
||||
self.current_ai_message_id = self.current_discussion.add_message(
|
||||
self.personality.name, "", parent = self.current_user_message_id
|
||||
) # first the content is empty, but we'll fill it at the end
|
||||
socketio.emit('infos',
|
||||
{
|
||||
"type": "input_message_infos",
|
||||
"bot": self.personality.name,
|
||||
"user": self.personality.user_name,
|
||||
"message":message,#markdown.markdown(message),
|
||||
"user_message_id": self.current_user_message_id,
|
||||
"ai_message_id": self.current_ai_message_id,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
# prepare query and reception
|
||||
self.discussion_messages = self.prepare_query(message_id)
|
||||
self.prepare_reception()
|
||||
self.generating = True
|
||||
# app.config['executor'] = ThreadPoolExecutor(max_workers=1)
|
||||
# app.config['executor'].submit(self.generate_message)
|
||||
print("## Generating message ##")
|
||||
self.generate_message()
|
||||
|
||||
print()
|
||||
print("## Done ##")
|
||||
print()
|
||||
|
||||
# Send final message
|
||||
self.socketio.emit('final', {
|
||||
'data': self.bot_says,
|
||||
'ai_message_id':self.current_ai_message_id,
|
||||
'parent':self.current_user_message_id, 'discussion_id':self.current_discussion.discussion_id
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
self.current_discussion.update_message(self.current_ai_message_id, self.bot_says)
|
||||
self.full_message_list.append(self.bot_says)
|
||||
self.cancel_gen = False
|
||||
return bot_says
|
||||
else:
|
||||
#No discussion available
|
||||
print("No discussion selected!!!")
|
||||
print("## Done ##")
|
||||
print()
|
||||
self.cancel_gen = False
|
||||
return ""
|
||||
|
||||
|
||||
def get_generation_status(self):
|
||||
return jsonify({"status":self.generating})
|
||||
return jsonify({"status":self.process.is_generating.value==1})
|
||||
|
||||
def stop_gen(self):
|
||||
self.cancel_gen = True
|
||||
print("Stop generation received")
|
||||
return jsonify({"status": "ok"})
|
||||
return jsonify({"status": "ok"})
|
||||
|
||||
|
||||
def rename(self):
|
||||
data = request.get_json()
|
||||
@ -680,9 +516,6 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
def new_discussion(self):
|
||||
title = request.args.get("title")
|
||||
timestamp = self.create_new_discussion(title)
|
||||
# app.config['executor'] = ThreadPoolExecutor(max_workers=1)
|
||||
# app.config['executor'].submit(self.create_chatbot)
|
||||
# target=self.create_chatbot()
|
||||
|
||||
# Return a success response
|
||||
return json.dumps({"id": self.current_discussion.discussion_id, "time": timestamp, "welcome_message":self.personality.welcome_message, "sender":self.personality.name})
|
||||
@ -700,7 +533,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
self.backend = backend_
|
||||
self.config['model'] = models[0]
|
||||
# Build chatbot
|
||||
self.chatbot_bindings = self.create_chatbot()
|
||||
self.process.set_config(self.config)
|
||||
return jsonify({"status": "ok"})
|
||||
else:
|
||||
return jsonify({"status": "no_models_found"})
|
||||
@ -711,10 +544,10 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
data = request.get_json()
|
||||
model = str(data["model"])
|
||||
if self.config['model']!= model:
|
||||
print("New model selected")
|
||||
print("set_model: New model selected")
|
||||
self.config['model'] = model
|
||||
# Build chatbot
|
||||
self.chatbot_bindings = self.create_chatbot()
|
||||
self.process.set_config(self.config)
|
||||
return jsonify({"status": "ok"})
|
||||
|
||||
return jsonify({"status": "error"})
|
||||
@ -728,11 +561,11 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
personality = str(data["personality"])
|
||||
|
||||
if self.config['backend']!=backend or self.config['model'] != model:
|
||||
print("New model selected")
|
||||
print("update_model_params: New model selected")
|
||||
|
||||
self.config['backend'] = backend
|
||||
self.config['model'] = model
|
||||
self.create_chatbot()
|
||||
self.process.set_config(self.config)
|
||||
|
||||
self.config['personality_language'] = personality_language
|
||||
self.config['personality_category'] = personality_category
|
||||
@ -740,7 +573,6 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
|
||||
personality_fn = f"personalities/{self.config['personality_language']}/{self.config['personality_category']}/{self.config['personality']}"
|
||||
print(f"Loading personality : {personality_fn}")
|
||||
self.personality = AIPersonality(personality_fn)
|
||||
|
||||
self.config['n_predict'] = int(data["nPredict"])
|
||||
self.config['seed'] = int(data["seed"])
|
||||
@ -755,6 +587,9 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
self.config['repeat_last_n'] = int(data["repeatLastN"])
|
||||
|
||||
save_config(self.config, self.config_file_path)
|
||||
|
||||
self.process.set_config(self.config)
|
||||
self.backend, self.model = self.process.rebuild_model()
|
||||
|
||||
print("==============================================")
|
||||
print("Parameters changed to:")
|
||||
@ -789,8 +624,12 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
models = []
|
||||
for model in model_list:
|
||||
filename = model['filename']
|
||||
server = model['server']
|
||||
filesize = model['filesize']
|
||||
path = f'https://gpt4all.io/models/{filename}'
|
||||
if server.endswith("/"):
|
||||
path = f'{server}{filename}'
|
||||
else:
|
||||
path = f'{server}/{filename}'
|
||||
local_path = Path(f'./models/{self.config["backend"]}/{filename}')
|
||||
is_installed = local_path.exists()
|
||||
models.append({
|
||||
@ -920,17 +759,9 @@ if __name__ == "__main__":
|
||||
if arg_value is not None:
|
||||
config[arg_name] = arg_value
|
||||
|
||||
try:
|
||||
personality_path = f"personalities/{config['personality_language']}/{config['personality_category']}/{config['personality']}"
|
||||
personality = AIPersonality(personality_path)
|
||||
except Exception as ex:
|
||||
print("Personality file not found. Please verify that the personality you have selected exists or select another personality. Some updates may lead to change in personality name or category, so check the personality selection in settings to be sure.")
|
||||
if config["debug"]:
|
||||
print(ex)
|
||||
personality = AIPersonality()
|
||||
# executor = ThreadPoolExecutor(max_workers=1)
|
||||
# app.config['executor'] = executor
|
||||
bot = Gpt4AllWebUI(app, socketio, config, personality, config_file_path)
|
||||
bot = Gpt4AllWebUI(app, socketio, config, config_file_path)
|
||||
|
||||
# chong Define custom WebSocketHandler with error handling
|
||||
class CustomWebSocketHandler(WebSocketHandler):
|
||||
@ -953,7 +784,3 @@ if __name__ == "__main__":
|
||||
|
||||
http_server = WSGIServer((config["host"], config["port"]), app, handler_class=WebSocketHandler)
|
||||
http_server.serve_forever()
|
||||
#if config["debug"]:
|
||||
# app.run(debug=True, host=config["host"], port=config["port"])
|
||||
#else:
|
||||
# app.run(host=config["host"], port=config["port"])
|
||||
|
458
pyGpt4All/api.py
458
pyGpt4All/api.py
@ -14,18 +14,253 @@ from pyGpt4All.db import DiscussionsDB
|
||||
from pathlib import Path
|
||||
import importlib
|
||||
from pyaipersonality import AIPersonality
|
||||
import multiprocessing as mp
|
||||
import threading
|
||||
import time
|
||||
import requests
|
||||
import urllib.request
|
||||
|
||||
__author__ = "parisneo"
|
||||
__github__ = "https://github.com/nomic-ai/gpt4all-ui"
|
||||
__copyright__ = "Copyright 2023, "
|
||||
__license__ = "Apache 2.0"
|
||||
|
||||
class GPT4AllAPI():
|
||||
def __init__(self, config:dict, personality:AIPersonality, config_file_path:str) -> None:
|
||||
|
||||
|
||||
class ModelProcess:
|
||||
def __init__(self, config=None):
|
||||
self.config = config
|
||||
self.personality = personality
|
||||
self.generate_queue = mp.Queue()
|
||||
self.generation_queue = mp.Queue()
|
||||
self.cancel_queue = mp.Queue(maxsize=1)
|
||||
self.clear_queue_queue = mp.Queue(maxsize=1)
|
||||
self.set_config_queue = mp.Queue(maxsize=1)
|
||||
self.started_queue = mp.Queue()
|
||||
self.process = None
|
||||
self.is_generating = mp.Value('i', 0)
|
||||
self.ready = False
|
||||
|
||||
def load_backend(self, backend_path):
|
||||
|
||||
# define the full absolute path to the module
|
||||
absolute_path = backend_path.resolve()
|
||||
|
||||
# infer the module name from the file path
|
||||
module_name = backend_path.stem
|
||||
|
||||
# use importlib to load the module from the file path
|
||||
loader = importlib.machinery.SourceFileLoader(module_name, str(absolute_path/"__init__.py"))
|
||||
backend_module = loader.load_module()
|
||||
backend_class = getattr(backend_module, backend_module.backend_name)
|
||||
return backend_class
|
||||
|
||||
def start(self):
|
||||
if self.process is None:
|
||||
self.process = mp.Process(target=self._run)
|
||||
self.process.start()
|
||||
|
||||
def stop(self):
|
||||
if self.process is not None:
|
||||
self.generate_queue.put(None)
|
||||
self.process.join()
|
||||
self.process = None
|
||||
|
||||
def set_backend(self, backend_path):
|
||||
self.backend = backend_path
|
||||
|
||||
def set_model(self, model_path):
|
||||
self.model = model_path
|
||||
|
||||
def set_config(self, config):
|
||||
self.set_config_queue.put(config)
|
||||
|
||||
|
||||
def generate(self, prompt, id, n_predict):
|
||||
self.generate_queue.put((prompt, id, n_predict))
|
||||
|
||||
def cancel_generation(self):
|
||||
self.cancel_queue.put(('cancel',))
|
||||
|
||||
def clear_queue(self):
|
||||
self.clear_queue_queue.put(('clear_queue',))
|
||||
|
||||
def rebuild_model(self, config):
|
||||
try:
|
||||
backend = self.load_backend(Path("backends")/config["backend"])
|
||||
print("Backend loaded successfully")
|
||||
try:
|
||||
model = backend(config)
|
||||
print("Model created successfully")
|
||||
except Exception as ex:
|
||||
print("Couldn't build model")
|
||||
print(ex)
|
||||
model = None
|
||||
except Exception as ex:
|
||||
print("Couldn't build backend")
|
||||
print(ex)
|
||||
backend = None
|
||||
model = None
|
||||
return backend, model
|
||||
|
||||
def _rebuild_model(self):
|
||||
try:
|
||||
self.backend = self.load_backend(Path("backends")/self.config["backend"])
|
||||
print("Backend loaded successfully")
|
||||
try:
|
||||
model_file = Path("models")/self.config["backend"]/self.config["model"]
|
||||
print(f"Loading model : {model_file}")
|
||||
self.model = self.backend(self.config)
|
||||
print("Model created successfully")
|
||||
except Exception as ex:
|
||||
print("Couldn't build model")
|
||||
print(ex)
|
||||
self.model = None
|
||||
except Exception as ex:
|
||||
print("Couldn't build backend")
|
||||
print(ex)
|
||||
self.backend = None
|
||||
self.model = None
|
||||
|
||||
def rebuild_personality(self):
|
||||
try:
|
||||
personality_path = f"personalities/{self.config['personality_language']}/{self.config['personality_category']}/{self.config['personality']}"
|
||||
personality = AIPersonality(personality_path)
|
||||
except Exception as ex:
|
||||
print("Personality file not found. Please verify that the personality you have selected exists or select another personality. Some updates may lead to change in personality name or category, so check the personality selection in settings to be sure.")
|
||||
if self.config["debug"]:
|
||||
print(ex)
|
||||
personality = AIPersonality()
|
||||
return personality
|
||||
|
||||
def _rebuild_personality(self):
|
||||
try:
|
||||
personality_path = f"personalities/{self.config['personality_language']}/{self.config['personality_category']}/{self.config['personality']}"
|
||||
self.personality = AIPersonality(personality_path)
|
||||
except Exception as ex:
|
||||
print("Personality file not found. Please verify that the personality you have selected exists or select another personality. Some updates may lead to change in personality name or category, so check the personality selection in settings to be sure.")
|
||||
if self.config["debug"]:
|
||||
print(ex)
|
||||
self.personality = AIPersonality()
|
||||
|
||||
def _run(self):
|
||||
self._rebuild_model()
|
||||
self._rebuild_personality()
|
||||
self._generate("I",0,1)
|
||||
print()
|
||||
print("Ready to receive data")
|
||||
print(f"Listening on :http://{self.config['host']}:{self.config['port']}")
|
||||
self.ready = True
|
||||
|
||||
while True:
|
||||
self._check_cancel_queue()
|
||||
self._check_clear_queue()
|
||||
|
||||
command = self.generate_queue.get()
|
||||
if command is None:
|
||||
break
|
||||
|
||||
if self.cancel_queue.empty() and self.clear_queue_queue.empty():
|
||||
self.is_generating.value = 1
|
||||
self.started_queue.put(1)
|
||||
self._generate(*command)
|
||||
while not self.generation_queue.empty():
|
||||
time.sleep(1)
|
||||
self.is_generating.value = 0
|
||||
|
||||
def _generate(self, prompt, id, n_predict):
|
||||
self.id = id
|
||||
if self.config["override_personality_model_parameters"]:
|
||||
self.model.generate(
|
||||
prompt,
|
||||
new_text_callback=self._callback,
|
||||
n_predict=n_predict,
|
||||
temp=self.config['temperature'],
|
||||
top_k=self.config['top_k'],
|
||||
top_p=self.config['top_p'],
|
||||
repeat_penalty=self.config['repeat_penalty'],
|
||||
repeat_last_n = self.config['repeat_last_n'],
|
||||
seed=self.config['seed'],
|
||||
n_threads=self.config['n_threads']
|
||||
)
|
||||
else:
|
||||
self.model.generate(
|
||||
prompt,
|
||||
new_text_callback=self._callback,
|
||||
n_predict=n_predict,
|
||||
temp=self.personality.model_temperature,
|
||||
top_k=self.personality.model_top_k,
|
||||
top_p=self.personality.model_top_p,
|
||||
repeat_penalty=self.personality.model_repeat_penalty,
|
||||
repeat_last_n = self.personality.model_repeat_last_n,
|
||||
#seed=self.config['seed'],
|
||||
n_threads=self.config['n_threads']
|
||||
)
|
||||
|
||||
def _callback(self, text):
|
||||
if not self.ready:
|
||||
print(".",end="")
|
||||
sys.stdout.flush()
|
||||
return True
|
||||
else:
|
||||
# Stream the generated text to the main process
|
||||
self.generation_queue.put((text,self.id))
|
||||
self._check_cancel_queue()
|
||||
self._check_clear_queue()
|
||||
# if stop generation is detected then stop
|
||||
if self.is_generating.value==1:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def _check_cancel_queue(self):
|
||||
while not self.cancel_queue.empty():
|
||||
command = self.cancel_queue.get()
|
||||
if command is not None:
|
||||
self._cancel_generation()
|
||||
|
||||
def _check_clear_queue(self):
|
||||
while not self.clear_queue_queue.empty():
|
||||
command = self.clear_queue_queue.get()
|
||||
if command is not None:
|
||||
self._clear_queue()
|
||||
|
||||
def _check_set_config_queue(self):
|
||||
while not self.set_config_queue.empty():
|
||||
config = self.set_config_queue.get()
|
||||
if config is not None:
|
||||
self._set_config(config)
|
||||
|
||||
def _cancel_generation(self):
|
||||
self.is_generating.value = 0
|
||||
|
||||
def _clear_queue(self):
|
||||
while not self.generate_queue.empty():
|
||||
self.generate_queue.get()
|
||||
|
||||
def _set_config(self, config):
|
||||
bk_cfg = self.config
|
||||
self.config = config
|
||||
# verify that the backend is the same
|
||||
if self.config["backend"]!=bk_cfg["backend"] or self.config["model"]!=bk_cfg["model"]:
|
||||
self._rebuild_model()
|
||||
|
||||
# verify that the personality is the same
|
||||
if self.config["personality"]!=bk_cfg["personality"] or self.config["personality_category"]!=bk_cfg["personality_category"] or self.config["personality_language"]!=bk_cfg["personality_language"]:
|
||||
self._rebuild_personality()
|
||||
|
||||
|
||||
class GPT4AllAPI():
|
||||
def __init__(self, config:dict, socketio, config_file_path:str) -> None:
|
||||
self.socketio = socketio
|
||||
#Create and launch the process
|
||||
self.process = ModelProcess(config)
|
||||
self.process.start()
|
||||
self.config = config
|
||||
|
||||
self.backend, self.model = self.process.rebuild_model(self.config)
|
||||
self.personality = self.process.rebuild_personality()
|
||||
if config["debug"]:
|
||||
print(print(f"{personality}"))
|
||||
print(print(f"{self.personality}"))
|
||||
self.config_file_path = config_file_path
|
||||
self.cancel_gen = False
|
||||
|
||||
@ -45,26 +280,84 @@ class GPT4AllAPI():
|
||||
|
||||
# This is used to keep track of messages
|
||||
self.full_message_list = []
|
||||
|
||||
# =========================================================================================
|
||||
# Socket IO stuff
|
||||
# =========================================================================================
|
||||
@socketio.on('connect')
|
||||
def connect():
|
||||
print('Client connected')
|
||||
|
||||
# Select backend
|
||||
self.BACKENDS_LIST = {f.stem:f for f in Path("backends").iterdir() if f.is_dir() and f.stem!="__pycache__"}
|
||||
@socketio.on('disconnect')
|
||||
def disconnect():
|
||||
print('Client disconnected')
|
||||
|
||||
if self.config["backend"] is None:
|
||||
self.backend = "gpt4all"
|
||||
self.backend = self.load_backend(self.BACKENDS_LIST[self.config["backend"]])
|
||||
else:
|
||||
try:
|
||||
self.backend = self.load_backend(self.BACKENDS_LIST[self.config["backend"]])
|
||||
# Build chatbot
|
||||
self.chatbot_bindings = self.create_chatbot()
|
||||
print("Chatbot created successfully")
|
||||
@socketio.on('install_model')
|
||||
def install_model(data):
|
||||
def install_model_():
|
||||
print("Install model triggered")
|
||||
model_path = data["path"]
|
||||
progress = 0
|
||||
installation_dir = Path(f'./models/{self.config["backend"]}/')
|
||||
filename = Path(model_path).name
|
||||
installation_path = installation_dir / filename
|
||||
print("Model install requested")
|
||||
print(f"Model path : {model_path}")
|
||||
|
||||
except Exception as ex:
|
||||
self.config["backend"] = "gpt4all"
|
||||
self.backend = self.load_backend(self.BACKENDS_LIST[self.config["backend"]])
|
||||
self.config["model"] = None
|
||||
print("No Models found, please select a backend and download a model for this tool to work")
|
||||
if installation_path.exists():
|
||||
print("Error: Model already exists")
|
||||
data.installing = False
|
||||
socketio.emit('install_progress',{'status': 'failed', 'error': 'model already exists'})
|
||||
|
||||
socketio.emit('install_progress',{'status': 'progress', 'progress': progress})
|
||||
|
||||
def callback(progress):
|
||||
socketio.emit('install_progress',{'status': 'progress', 'progress': progress})
|
||||
|
||||
self.download_file(model_path, installation_path, callback)
|
||||
socketio.emit('install_progress',{'status': 'succeeded', 'error': ''})
|
||||
tpe = threading.Thread(target=install_model_, args=())
|
||||
tpe.start()
|
||||
|
||||
@socketio.on('uninstall_model')
|
||||
def uninstall_model(data):
|
||||
model_path = data['path']
|
||||
installation_dir = Path(f'./models/{self.config["backend"]}/')
|
||||
filename = Path(model_path).name
|
||||
installation_path = installation_dir / filename
|
||||
|
||||
if not installation_path.exists():
|
||||
socketio.emit('install_progress',{'status': 'failed', 'error': 'The model does not exist'})
|
||||
|
||||
installation_path.unlink()
|
||||
socketio.emit('install_progress',{'status': 'succeeded', 'error': ''})
|
||||
|
||||
|
||||
|
||||
@socketio.on('generate_msg')
|
||||
def generate_msg(data):
|
||||
if self.current_discussion is None:
|
||||
if self.db.does_last_discussion_have_messages():
|
||||
self.current_discussion = self.db.create_discussion()
|
||||
else:
|
||||
self.current_discussion = self.db.load_last_discussion()
|
||||
|
||||
message = data["prompt"]
|
||||
message_id = self.current_discussion.add_message(
|
||||
"user", message, parent=self.message_id
|
||||
)
|
||||
|
||||
self.current_user_message_id = message_id
|
||||
tpe = threading.Thread(target=self.start_message_generation, args=(message, message_id))
|
||||
tpe.start()
|
||||
|
||||
@socketio.on('generate_msg_from')
|
||||
def handle_connection(data):
|
||||
message_id = int(data['id'])
|
||||
message = data["prompt"]
|
||||
self.current_user_message_id = message_id
|
||||
tpe = threading.Thread(target=self.start_message_generation, args=(message, message_id))
|
||||
tpe.start()
|
||||
# generation status
|
||||
self.generating=False
|
||||
|
||||
@ -89,6 +382,24 @@ class GPT4AllAPI():
|
||||
self._message_id = id
|
||||
|
||||
|
||||
def download_file(self, url, installation_path, callback=None):
|
||||
"""
|
||||
Downloads a file from a URL and displays the download progress using tqdm.
|
||||
|
||||
Args:
|
||||
url (str): The URL of the file to download.
|
||||
callback (function, optional): A callback function to be called during the download
|
||||
with the progress percentage as an argument. Defaults to None.
|
||||
"""
|
||||
def report_hook(count, block_size, total_size):
|
||||
if callback is not None:
|
||||
percentage = (count * block_size / total_size) * 100
|
||||
callback(percentage)
|
||||
|
||||
urllib.request.urlretrieve(url, installation_path, reporthook=report_hook)
|
||||
|
||||
if callback is not None:
|
||||
callback(100.0)
|
||||
|
||||
def load_backend(self, backend_path):
|
||||
|
||||
@ -104,10 +415,8 @@ class GPT4AllAPI():
|
||||
backend_class = getattr(backend_module, backend_module.backend_name)
|
||||
return backend_class
|
||||
|
||||
def create_chatbot(self):
|
||||
return self.backend(self.config)
|
||||
|
||||
def condition_chatbot(self, conditionning_message):
|
||||
def condition_chatbot(self):
|
||||
if self.current_discussion is None:
|
||||
self.current_discussion = self.db.load_last_discussion()
|
||||
|
||||
@ -196,14 +505,10 @@ class GPT4AllAPI():
|
||||
|
||||
return string
|
||||
|
||||
|
||||
def new_text_callback(self, text: str):
|
||||
if self.cancel_gen:
|
||||
return False
|
||||
print(text, end="")
|
||||
def process_chunk(self, chunk):
|
||||
print(chunk[0],end="")
|
||||
sys.stdout.flush()
|
||||
|
||||
self.bot_says += text
|
||||
self.bot_says += chunk[0]
|
||||
if not self.personality.detect_antiprompt(self.bot_says):
|
||||
self.socketio.emit('message', {
|
||||
'data': self.bot_says,
|
||||
@ -214,45 +519,66 @@ class GPT4AllAPI():
|
||||
)
|
||||
if self.cancel_gen:
|
||||
print("Generation canceled")
|
||||
self.process.cancel_generation()
|
||||
self.cancel_gen = False
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
else:
|
||||
self.bot_says = self.remove_text_from_string(self.bot_says, self.personality.user_message_prefix.strip())
|
||||
self.process.cancel_generation()
|
||||
print("The model is halucinating")
|
||||
return False
|
||||
|
||||
def generate_message(self):
|
||||
self.generating=True
|
||||
gc.collect()
|
||||
total_n_predict = self.config['n_predict']
|
||||
print(f"Generating {total_n_predict} outputs... ")
|
||||
print(f"Input text :\n{self.discussion_messages}")
|
||||
if self.config["override_personality_model_parameters"]:
|
||||
self.chatbot_bindings.generate(
|
||||
self.discussion_messages,
|
||||
new_text_callback=self.new_text_callback,
|
||||
n_predict=total_n_predict,
|
||||
temp=self.config['temperature'],
|
||||
top_k=self.config['top_k'],
|
||||
top_p=self.config['top_p'],
|
||||
repeat_penalty=self.config['repeat_penalty'],
|
||||
repeat_last_n = self.config['repeat_last_n'],
|
||||
seed=self.config['seed'],
|
||||
n_threads=self.config['n_threads']
|
||||
|
||||
def start_message_generation(self, message, message_id):
|
||||
bot_says = ""
|
||||
|
||||
# send the message to the bot
|
||||
print(f"Received message : {message}")
|
||||
if self.current_discussion:
|
||||
# First we need to send the new message ID to the client
|
||||
self.current_ai_message_id = self.current_discussion.add_message(
|
||||
self.personality.name, "", parent = self.current_user_message_id
|
||||
) # first the content is empty, but we'll fill it at the end
|
||||
self.socketio.emit('infos',
|
||||
{
|
||||
"type": "input_message_infos",
|
||||
"bot": self.personality.name,
|
||||
"user": self.personality.user_name,
|
||||
"message":message,#markdown.markdown(message),
|
||||
"user_message_id": self.current_user_message_id,
|
||||
"ai_message_id": self.current_ai_message_id,
|
||||
}
|
||||
)
|
||||
|
||||
# prepare query and reception
|
||||
self.discussion_messages = self.prepare_query(message_id)
|
||||
self.prepare_reception()
|
||||
self.generating = True
|
||||
print("## Generating message ##")
|
||||
self.process.generate(self.discussion_messages, message_id, n_predict = self.config['n_predict'])
|
||||
self.process.started_queue.get()
|
||||
while(self.process.is_generating.value): # Simulating other commands being issued
|
||||
while not self.process.generation_queue.empty():
|
||||
self.process_chunk(self.process.generation_queue.get())
|
||||
|
||||
print()
|
||||
print("## Done ##")
|
||||
print()
|
||||
|
||||
# Send final message
|
||||
self.socketio.emit('final', {
|
||||
'data': self.bot_says,
|
||||
'ai_message_id':self.current_ai_message_id,
|
||||
'parent':self.current_user_message_id, 'discussion_id':self.current_discussion.discussion_id
|
||||
}
|
||||
)
|
||||
|
||||
self.current_discussion.update_message(self.current_ai_message_id, self.bot_says)
|
||||
self.full_message_list.append(self.bot_says)
|
||||
self.cancel_gen = False
|
||||
return bot_says
|
||||
else:
|
||||
self.chatbot_bindings.generate(
|
||||
self.discussion_messages,
|
||||
new_text_callback=self.new_text_callback,
|
||||
n_predict=total_n_predict,
|
||||
temp=self.personality.model_temperature,
|
||||
top_k=self.personality.model_top_k,
|
||||
top_p=self.personality.model_top_p,
|
||||
repeat_penalty=self.personality.model_repeat_penalty,
|
||||
repeat_last_n = self.personality.model_repeat_last_n,
|
||||
#seed=self.config['seed'],
|
||||
n_threads=self.config['n_threads']
|
||||
)
|
||||
self.generating=False
|
||||
#No discussion available
|
||||
print("No discussion selected!!!")
|
||||
print("## Done ##")
|
||||
print()
|
||||
self.cancel_gen = False
|
||||
return ""
|
||||
|
@ -14,7 +14,7 @@ var globals={
|
||||
waitAnimation:undefined
|
||||
}
|
||||
|
||||
var socket = io.connect(location.protocol + '//' + document.domain + ':' + location.port);
|
||||
var socket = io.connect('http://' + document.domain + ':' + location.port);
|
||||
|
||||
socket.on('connect', function() {
|
||||
});
|
||||
@ -63,4 +63,4 @@ function send_message(service_name, parameters){
|
||||
globals.socket.emit(service_name, parameters);
|
||||
globals.is_generating = true
|
||||
}
|
||||
}
|
||||
}
|
36
web/dist/assets/index-2610ee40.js
vendored
Normal file
36
web/dist/assets/index-2610ee40.js
vendored
Normal file
File diff suppressed because one or more lines are too long
1
web/dist/assets/index-98afc6c1.css
vendored
Normal file
1
web/dist/assets/index-98afc6c1.css
vendored
Normal file
File diff suppressed because one or more lines are too long
4
web/dist/index.html
vendored
4
web/dist/index.html
vendored
@ -6,8 +6,8 @@
|
||||
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>GPT4All - WEBUI</title>
|
||||
<script type="module" crossorigin src="/assets/index-a948d86d.js"></script>
|
||||
<link rel="stylesheet" href="/assets/index-1c281352.css">
|
||||
<script type="module" crossorigin src="/assets/index-2610ee40.js"></script>
|
||||
<link rel="stylesheet" href="/assets/index-98afc6c1.css">
|
||||
</head>
|
||||
<body>
|
||||
<div id="app"></div>
|
||||
|
@ -13,6 +13,7 @@
|
||||
/>
|
||||
{{ title }}
|
||||
</h3>
|
||||
<a :href="path">{{ title }}</a>
|
||||
<p class="opacity-80">{{ description }}</p>
|
||||
</div>
|
||||
<div class="flex-shrink-0">
|
||||
@ -24,13 +25,17 @@
|
||||
>
|
||||
<template v-if="installing">
|
||||
<div class="flex items-center space-x-2">
|
||||
<div class="h-2 w-20 bg-gray-300 rounded"></div>
|
||||
<div class="h-2 w-20 bg-gray-300 rounded">
|
||||
<div :style="{ width: progress + '%' }" class="h-full bg-green-500"></div>
|
||||
</div>
|
||||
<span>Installing...</span>
|
||||
</div>
|
||||
</template>
|
||||
<template v-else-if="uninstalling">
|
||||
<div class="flex items-center space-x-2">
|
||||
<div class="h-2 w-20 bg-gray-300 rounded"></div>
|
||||
<div class="h-2 w-20 bg-gray-300 rounded">
|
||||
<div :style="{ width: progress + '%' }" class="h-full bg-green-500"></div>
|
||||
</div>
|
||||
<span>Uninstalling...</span>
|
||||
</div>
|
||||
</template>
|
||||
@ -43,9 +48,13 @@
|
||||
</template>
|
||||
|
||||
<script>
|
||||
import { socket, state } from '@/services/websocket.js'
|
||||
import socket from '@/services/websocket.js'
|
||||
export default {
|
||||
props: {
|
||||
progress: {
|
||||
type: Number,
|
||||
default: 0
|
||||
},
|
||||
title: String,
|
||||
icon: String,
|
||||
path: String,
|
||||
@ -76,8 +85,8 @@ export default {
|
||||
},
|
||||
handleSelection() {
|
||||
if (this.isInstalled && !this.selected) {
|
||||
this.onSelected(this);
|
||||
this.selected=true;
|
||||
onSelected(this);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3,12 +3,9 @@
|
||||
// Description :
|
||||
// All websocket stuff can be found here.
|
||||
// More info can be found here https://socket.io/how-to/use-with-vue
|
||||
import io from 'socket.io-client';
|
||||
import { reactive } from "vue";
|
||||
import { createApp } from 'vue';
|
||||
import io from 'socket.io-client';
|
||||
|
||||
const state = reactive({
|
||||
connected: false,
|
||||
});
|
||||
|
||||
const socket = new io(import.meta.env.VITE_GPT4ALL_API );
|
||||
|
||||
@ -26,14 +23,18 @@ socket.onerror = (error) => {
|
||||
};
|
||||
|
||||
socket.on("connect", () => {
|
||||
state.connected = true;
|
||||
console.log('WebSocket connected (websocket)');
|
||||
});
|
||||
|
||||
socket.on("disconnect", () => {
|
||||
state.connected = false;
|
||||
console.log('WebSocket disonnected (websocket)');
|
||||
|
||||
});
|
||||
|
||||
export {socket, state};
|
||||
const app = createApp(/* your root component */);
|
||||
|
||||
app.config.globalProperties.$socket = socket;
|
||||
|
||||
app.mount(/* your root element */);
|
||||
|
||||
export default socket;
|
||||
|
||||
|
@ -148,6 +148,7 @@
|
||||
</style>
|
||||
|
||||
<script>
|
||||
|
||||
export default {
|
||||
setup() { },
|
||||
data() {
|
||||
@ -831,7 +832,7 @@ export default {
|
||||
},
|
||||
computed: {
|
||||
socketConnected() {
|
||||
return state.connected
|
||||
return true
|
||||
},
|
||||
selectedDiscussions() {
|
||||
nextTick(() => {
|
||||
@ -856,7 +857,7 @@ import feather from 'feather-icons'
|
||||
import axios from 'axios'
|
||||
import { nextTick } from 'vue'
|
||||
|
||||
import { socket, state } from '@/services/websocket.js'
|
||||
import socket from '@/services/websocket.js'
|
||||
|
||||
import { onMounted } from 'vue'
|
||||
import { initFlowbite } from 'flowbite'
|
||||
|
@ -171,7 +171,7 @@
|
||||
</div>
|
||||
|
||||
<input id="temperature" @change="update_setting('temperature', $event.target.value)" type="range"
|
||||
v-model="configFile.temp" min="0" max="5" step="0.1"
|
||||
v-model="configFile.temperature" min="0" max="5" step="0.1"
|
||||
class="flex-none h-2 mt-14 mb-2 w-full bg-gray-200 rounded-lg appearance-none cursor-pointer dark:bg-gray-700 focus:ring-blue-500 focus:border-blue-500 dark:border-gray-600 dark:placeholder-gray-400 dark:focus:ring-blue-500 dark:focus:border-blue-500">
|
||||
</div>
|
||||
</div>
|
||||
@ -297,7 +297,7 @@ import { nextTick } from 'vue'
|
||||
import MessageBox from "@/components/MessageBox.vue";
|
||||
import YesNoDialog from "@/components/YesNoDialog.vue";
|
||||
import ModelEntry from '@/components/ModelEntry.vue';
|
||||
import { socket, state } from '@/services/websocket.js'
|
||||
import socket from '@/services/websocket.js'
|
||||
axios.defaults.baseURL = import.meta.env.VITE_GPT4ALL_API_BASEURL
|
||||
export default {
|
||||
components: {
|
||||
@ -315,8 +315,6 @@ export default {
|
||||
data() {
|
||||
|
||||
return {
|
||||
// Websocket
|
||||
socket: socket,
|
||||
// Models zoo installer stuff
|
||||
models: [],
|
||||
// Accordeon stuff
|
||||
@ -348,40 +346,40 @@ export default {
|
||||
},
|
||||
onSelected(model_object){
|
||||
console.log("Selected model")
|
||||
update_setting('model', model_object.title)
|
||||
this.update_setting('model', model_object.title, (res)=>{console.log("Model selected"); })
|
||||
},
|
||||
// Model installation
|
||||
onInstall(model_object) {
|
||||
let isInstalled = model_object.isInstalled
|
||||
let path = model_object.path
|
||||
let path = model_object.path;
|
||||
this.showProgress = true;
|
||||
this.progress = 0;
|
||||
console.log("installing...")
|
||||
console.log("installing...");
|
||||
|
||||
// Use an arrow function for progressListener
|
||||
const progressListener = (response) => {
|
||||
console.log("received something");
|
||||
if (response.status === 'progress') {
|
||||
this.progress = message.progress;
|
||||
console.log(`Progress = ${response.progress}`);
|
||||
this.progress = response.progress;
|
||||
} else if (response.status === 'succeeded') {
|
||||
// Installation completed
|
||||
model_object.installing = false;
|
||||
this.showProgress = false;
|
||||
// Update the isInstalled property of the corresponding model
|
||||
const index = this.models.findIndex((model) => model.path === path);
|
||||
this.models[index].isInstalled = true;
|
||||
socket.off('install_progress', progressListener);
|
||||
// Update the isInstalled property of the corresponding model
|
||||
const index = this.models.findIndex((model) => model.path === path);
|
||||
this.models[index].isInstalled = true;
|
||||
this.showProgress = false;
|
||||
|
||||
this.socket.off('install_progress', progressListener);
|
||||
} else if (response.status === 'failed') {
|
||||
// Installation failed or encountered an error
|
||||
model_object.installing = false;
|
||||
this.showProgress = false;
|
||||
this.socket.off('install_progress', progressListener);
|
||||
console.error('Installation failed:', message.error);
|
||||
socket.off('install_progress', progressListener);
|
||||
// Installation failed or encountered an error
|
||||
model_object.installing = false;
|
||||
this.showProgress = false;
|
||||
console.error('Installation failed:', message.error);
|
||||
}
|
||||
};
|
||||
this.socket.on('install_progress', progressListener);
|
||||
this.socket.emit('install_model', { path: path });
|
||||
|
||||
|
||||
socket.on('install_progress', progressListener);
|
||||
socket.emit('install_model', { path: path });
|
||||
console.log("Started installation, please wait");
|
||||
},
|
||||
onUninstall(model_object) {
|
||||
console.log("uninstalling model...")
|
||||
@ -389,24 +387,23 @@ export default {
|
||||
if (response.status === 'progress') {
|
||||
this.progress = message.progress;
|
||||
} else if (response.status === 'succeeded') {
|
||||
console.log(model_object)
|
||||
// Installation completed
|
||||
model_object.uninstalling = false;
|
||||
|
||||
socket.off('install_progress', progressListener);
|
||||
this.showProgress = false;
|
||||
// Update the isInstalled property of the corresponding model
|
||||
model_object.isInstalled = false;
|
||||
|
||||
this.socket.off('install_progress', progressListener);
|
||||
const index = this.models.findIndex((model) => model.path === model_object.path);
|
||||
this.models[index].isInstalled = false;
|
||||
} else if (response.status === 'failed') {
|
||||
// Installation failed or encountered an error
|
||||
model_object.uninstalling = false;
|
||||
this.showProgress = false;
|
||||
this.socket.off('install_progress', progressListener);
|
||||
socket.off('install_progress', progressListener);
|
||||
console.error('Installation failed:', message.error);
|
||||
}
|
||||
};
|
||||
this.socket.on('install_progress', progressListener);
|
||||
this.socket.emit('uninstall_model', { path: model_object.path });
|
||||
socket.on('install_progress', progressListener);
|
||||
socket.emit('uninstall_model', { path: model_object.path });
|
||||
},
|
||||
// messagebox ok stuff
|
||||
onMessageBoxOk() {
|
||||
@ -426,7 +423,7 @@ export default {
|
||||
this.api_get_req("get_config").then(response => {
|
||||
this.configFile = response
|
||||
console.log("selecting model")
|
||||
self.models.forEach(model => {
|
||||
this.models.forEach(model => {
|
||||
console.log(`${model} -> ${response["model"]}`)
|
||||
if(model.title==response["model"]){
|
||||
model.selected=true;
|
||||
@ -459,7 +456,7 @@ export default {
|
||||
},
|
||||
update_backend(value) {
|
||||
console.log("Upgrading backend")
|
||||
res = this.update_setting('backend', value, (res)=>{console.log("Backend changed"); })
|
||||
this.update_setting('backend', value, (res)=>{console.log("Backend changed"); this.fetchModels(); })
|
||||
},
|
||||
save_configuration() {
|
||||
this.showConfirmation = false
|
||||
|
Loading…
x
Reference in New Issue
Block a user