mirror of
https://github.com/ParisNeo/lollms-webui.git
synced 2024-12-22 05:37:48 +00:00
Merge branch 'lollms' into lollms-patchy2
This commit is contained in:
commit
14c13ab062
@ -136,7 +136,7 @@ class ModelProcess:
|
||||
|
||||
return string
|
||||
|
||||
def load_binding(self, binding_name:str, install=False):
|
||||
def load_binding(self, binding_name:str, install=False, force_install=False):
|
||||
if install:
|
||||
print(f"Loading binding {binding_name} install ON")
|
||||
else:
|
||||
@ -151,7 +151,7 @@ class ModelProcess:
|
||||
# first find out if there is a requirements.txt file
|
||||
install_file_name="install.py"
|
||||
install_script_path = binding_path / install_file_name
|
||||
if install_script_path.exists():
|
||||
if install_script_path.exists() or force_install:
|
||||
module_name = install_file_name[:-3] # Remove the ".py" extension
|
||||
module_spec = importlib.util.spec_from_file_location(module_name, str(install_script_path))
|
||||
module = importlib.util.module_from_spec(module_spec)
|
||||
@ -837,7 +837,9 @@ class LoLLMsAPPI():
|
||||
if self.current_discussion:
|
||||
# First we need to send the new message ID to the client
|
||||
self.current_ai_message_id = self.current_discussion.add_message(
|
||||
self.personality.name, "", parent = self.current_user_message_id
|
||||
self.personality.name,
|
||||
"",
|
||||
parent = self.current_user_message_id
|
||||
) # first the content is empty, but we'll fill it at the end
|
||||
self.socketio.emit('infos',
|
||||
{
|
||||
|
@ -365,7 +365,7 @@ class Discussion:
|
||||
new_content (str): The nex message content
|
||||
"""
|
||||
self.discussions_db.update(
|
||||
f"UPDATE message SET content = ? WHERE id = ?",(new_content,message_id)
|
||||
f"UPDATE message SET content = ?, finished_generating_at = ? WHERE id = ?",(new_content, datetime.now().strftime('%Y-%m-%d %H:%M:%S'),message_id)
|
||||
)
|
||||
|
||||
def message_rank_up(self, message_id):
|
||||
|
20
app.py
20
app.py
@ -84,6 +84,7 @@ class LoLLMsWebUI(LoLLMsAPPI):
|
||||
# =========================================================================================
|
||||
|
||||
|
||||
self.add_endpoint("/reinstall_binding", "reinstall_binding", self.reinstall_binding, methods=["POST"])
|
||||
|
||||
|
||||
self.add_endpoint("/switch_personal_path", "switch_personal_path", self.switch_personal_path, methods=["POST"])
|
||||
@ -738,6 +739,24 @@ class LoLLMsWebUI(LoLLMsAPPI):
|
||||
"active_personality_id":self.config["active_personality_id"]
|
||||
})
|
||||
|
||||
def reinstall_binding(self):
|
||||
try:
|
||||
data = request.get_json()
|
||||
# Further processing of the data
|
||||
except Exception as e:
|
||||
print(f"Error occurred while parsing JSON: {e}")
|
||||
return
|
||||
print(f"- Reinstalling binding {data['name']}...",end="")
|
||||
try:
|
||||
self.binding = self.process.load_binding(self.config["binding_name"], install=True, force_install=True)
|
||||
return jsonify({"status": True})
|
||||
except Exception as ex:
|
||||
print(f"Couldn't build binding: [{ex}]")
|
||||
return jsonify({"status":False, 'error':str(ex)})
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def mount_personality(self):
|
||||
print("- Mounting personality ...",end="")
|
||||
@ -813,6 +832,7 @@ class LoLLMsWebUI(LoLLMsAPPI):
|
||||
self.personality = self.mounted_personalities[self.config["active_personality_id"]]
|
||||
self.apply_settings()
|
||||
ASCIIColors.success("ok")
|
||||
print(f"Mounted {self.personality.name}")
|
||||
return jsonify({
|
||||
"status": True,
|
||||
"personalities":self.config["personalities"],
|
||||
|
@ -1,7 +1,7 @@
|
||||
# =================== Lord Of Large Language Models Configuration file ===========================
|
||||
version: 7
|
||||
binding_name: c_transformers
|
||||
model_name: ggml-gpt4all-j-v1.3-groovy.bin
|
||||
binding_name: llama_cpp_official
|
||||
model_name: null
|
||||
|
||||
# Host information
|
||||
host: localhost
|
||||
|
Loading…
Reference in New Issue
Block a user