Merge pull request #275 from ParisNeo/lollms

Lollms
This commit is contained in:
Saifeddine ALOUI 2023-06-12 16:21:17 +02:00 committed by GitHub
commit 11275a772a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 138 additions and 103 deletions

View File

@ -7,6 +7,7 @@
# Description :
# A simple api to communicate with lollms-webui and its models.
######
from flask import request
from datetime import datetime
from api.db import DiscussionsDB
from api.helpers import compare_lists
@ -15,6 +16,7 @@ import importlib
from lollms.personality import AIPersonality, MSG_TYPE
from lollms.binding import LOLLMSConfig
from lollms.paths import LollmsPaths
from lollms.helpers import ASCIIColors
import multiprocessing as mp
import threading
import time
@ -138,6 +140,11 @@ class ModelProcess:
print(f"Loading binding {binding_name} install ON")
else:
print(f"Loading binding : {binding_name} install is off")
if binding_name is None:
self.model
binding_path = self.lollms_paths.bindings_zoo_path/binding_name
if install:
# first find out if there is a requirements.txt file
@ -174,12 +181,6 @@ class ModelProcess:
self.process.join()
self.process = None
def set_binding(self, binding_path):
self.binding = binding_path
def set_model(self, model_path):
self.model = model_path
def set_config(self, config):
try:
self.set_config_result_queue.get_nowait()
@ -199,28 +200,30 @@ class ModelProcess:
def cancel_generation(self):
self.completion_signal.set()
self.cancel_queue.put(('cancel',))
print("Canel request received")
ASCIIColors.error("Canel request received")
def clear_queue(self):
self.clear_queue_queue.put(('clear_queue',))
def rebuild_binding(self, config):
try:
print(" ******************* Building Binding from main Process *************************")
ASCIIColors.success(" ******************* Building Binding from main Process *************************")
binding = self.load_binding(config["binding_name"], install=True)
print("Binding loaded successfully")
ASCIIColors.success("Binding loaded successfully")
except Exception as ex:
print("Couldn't build binding.")
print(ex)
ASCIIColors.error("Couldn't build binding.")
ASCIIColors.error("-----------------")
print(f"It seems that there is no valid binding selected. Please use the ui settings to select a binding.\nHere is encountered error: {ex}")
ASCIIColors.error("-----------------")
binding = None
return binding
def _rebuild_model(self):
try:
self.reset_config_result()
print(" ******************* Building Binding from generation Process *************************")
ASCIIColors.success(" ******************* Building Binding from generation Process *************************")
self.binding = self.load_binding(self.config["binding_name"], install=True)
print("Binding loaded successfully")
ASCIIColors.success("Binding loaded successfully")
try:
model_file = self.lollms_paths.personal_models_path/self.config["binding_name"]/self.config["model_name"]
print(f"Loading model : {model_file}")
@ -234,11 +237,11 @@ class ModelProcess:
print(f"Couldn't build model {self.config['model_name']} : {ex}")
self.model = None
self._set_config_result['status'] ='failed'
self._set_config_result['binding_status'] ='failed'
self._set_config_result['errors'].append(f"couldn't build binding:{ex}")
self._set_config_result['model_status'] ='failed'
self._set_config_result['errors'].append(f"couldn't build model:{ex}")
except Exception as ex:
traceback.print_exc()
print("Couldn't build binding")
print("Couldn't build model")
print(ex)
self.binding = None
self.model = None
@ -248,7 +251,7 @@ class ModelProcess:
def rebuild_personalities(self):
mounted_personalities=[]
print(f" ******************* Building mounted Personalities from main Process *************************")
ASCIIColors.success(f" ******************* Building mounted Personalities from main Process *************************")
for personality in self.config['personalities']:
try:
print(f" {personality}")
@ -257,12 +260,12 @@ class ModelProcess:
personality = AIPersonality(self.lollms_paths, personality_path, run_scripts=False)
mounted_personalities.append(personality)
except Exception as ex:
print(f"Personality file not found or is corrupted ({personality_path}).\nPlease verify that the personality you have selected exists or select another personality. Some updates may lead to change in personality name or category, so check the personality selection in settings to be sure.")
ASCIIColors.error(f"Personality file not found or is corrupted ({personality_path}).\nPlease verify that the personality you have selected exists or select another personality. Some updates may lead to change in personality name or category, so check the personality selection in settings to be sure.")
if self.config["debug"]:
print(ex)
personality = AIPersonality(self.lollms_paths)
print(f" ************ Personalities mounted (Main process) ***************************")
ASCIIColors.success(f" ************ Personalities mounted (Main process) ***************************")
return mounted_personalities
@ -270,22 +273,21 @@ class ModelProcess:
self.mounted_personalities=[]
failed_personalities=[]
self.reset_config_result()
print(f" ******************* Building mounted Personalities from generation Process *************************")
ASCIIColors.success(f" ******************* Building mounted Personalities from generation Process *************************")
for personality in self.config['personalities']:
try:
print(f" {personality}")
personality_path = self.lollms_paths.personalities_zoo_path/f"{personality}"
personality = AIPersonality(self.lollms_paths, personality_path, run_scripts=True)
personality = AIPersonality(self.lollms_paths, personality_path, run_scripts=True, model=self.model)
self.mounted_personalities.append(personality)
except Exception as ex:
print(f"Personality file not found or is corrupted ({personality_path}).\nPlease verify that the personality you have selected exists or select another personality. Some updates may lead to change in personality name or category, so check the personality selection in settings to be sure.")
if self.config["debug"]:
print(ex)
personality = AIPersonality(self.lollms_paths)
ASCIIColors.error(f"Personality file not found or is corrupted ({personality_path}).\nPlease verify that the personality you have selected exists or select another personality. Some updates may lead to change in personality name or category, so check the personality selection in settings to be sure.")
ASCIIColors.error(f"Exception received is: {ex}")
personality = AIPersonality(self.lollms_paths, model=self.model)
failed_personalities.append(personality_path)
self._set_config_result['errors'].append(f"couldn't build personalities:{ex}")
print(f" ************ Personalities mounted (Generation process) ***************************")
ASCIIColors.success(f" ************ Personalities mounted (Generation process) ***************************")
if len(failed_personalities)==len(self.config['personalities']):
self._set_config_result['status'] ='failed'
self._set_config_result['personalities_status'] ='failed'
@ -293,9 +295,11 @@ class ModelProcess:
self._set_config_result['status'] ='semi_failed'
self._set_config_result['personalities_status'] ='semi_failed'
if self.config['active_personality_id']<len(self.mounted_personalities):
self.personality = self.mounted_personalities[self.config['active_personality_id']]
self.mounted_personalities = self.config["personalities"]
print("Personality set successfully")
ASCIIColors.success("Personality set successfully")
else:
ASCIIColors.error("Failed to set personality. Please select a valid one")
def _run(self):
self._rebuild_model()
@ -320,7 +324,7 @@ class ModelProcess:
print("No model loaded. Waiting for new configuration instructions")
self.ready = True
print(f"Listening on :http://{self.config['host']}:{self.config['port']}")
ASCIIColors.print(ASCIIColors.color_bright_blue,f"Listening on :http://{self.config['host']}:{self.config['port']}")
while True:
try:
if not self.generate_queue.empty():
@ -333,10 +337,11 @@ class ModelProcess:
if self.personality.processor_cfg is not None:
if "custom_workflow" in self.personality.processor_cfg:
if self.personality.processor_cfg["custom_workflow"]:
print("Running workflow")
ASCIIColors.success("Running workflow")
self.completion_signal.clear()
self.start_signal.set()
output = self.personality.processor.run_workflow(self._generate, command[1], command[0], self._callback)
output = self.personality.processor.run_workflow( command[1], command[0], self._callback)
self._callback(output, 0)
self.completion_signal.set()
self.start_signal.clear()
@ -496,25 +501,26 @@ class LoLLMsAPPI():
# This is used to keep track of messages
self.full_message_list = []
self.current_room_id = None
# =========================================================================================
# Socket IO stuff
# =========================================================================================
@socketio.on('connect')
def connect():
print('Client connected')
ASCIIColors.success(f'Client {request.sid} connected')
@socketio.on('disconnect')
def disconnect():
print('Client disconnected')
ASCIIColors.error(f'Client {request.sid} disconnected')
@socketio.on('install_model')
def install_model(data):
room_id = request.sid
def install_model_():
print("Install model triggered")
model_path = data["path"]
progress = 0
installation_dir = Path(f'./models/{self.config["binding_name"]}/')
installation_dir = self.lollms_paths.personal_models_path/self.config["binding_name"]
filename = Path(model_path).name
installation_path = installation_dir / filename
print("Model install requested")
@ -522,18 +528,18 @@ class LoLLMsAPPI():
if installation_path.exists():
print("Error: Model already exists")
socketio.emit('install_progress',{'status': 'failed', 'error': 'model already exists'})
socketio.emit('install_progress',{'status': 'failed', 'error': 'model already exists'}, room=room_id)
socketio.emit('install_progress',{'status': 'progress', 'progress': progress})
socketio.emit('install_progress',{'status': 'progress', 'progress': progress}, room=room_id)
def callback(progress):
socketio.emit('install_progress',{'status': 'progress', 'progress': progress})
socketio.emit('install_progress',{'status': 'progress', 'progress': progress}, room=room_id)
if hasattr(self.binding, "download_model"):
self.binding.download_model(model_path, installation_path, callback)
else:
self.download_file(model_path, installation_path, callback)
socketio.emit('install_progress',{'status': 'succeeded', 'error': ''})
socketio.emit('install_progress',{'status': 'succeeded', 'error': ''}, room=room_id)
tpe = threading.Thread(target=install_model_, args=())
tpe.start()
@ -541,20 +547,21 @@ class LoLLMsAPPI():
@socketio.on('uninstall_model')
def uninstall_model(data):
model_path = data['path']
installation_dir = Path(f'./models/{self.config["binding_name"]}/')
installation_dir = self.lollms_paths.personal_models_path/self.config["binding_name"]
filename = Path(model_path).name
installation_path = installation_dir / filename
if not installation_path.exists():
socketio.emit('install_progress',{'status': 'failed', 'error': 'The model does not exist'})
socketio.emit('install_progress',{'status': 'failed', 'error': 'The model does not exist'}, room=request.sid)
installation_path.unlink()
socketio.emit('install_progress',{'status': 'succeeded', 'error': ''})
socketio.emit('install_progress',{'status': 'succeeded', 'error': ''}, room=request.sid)
@socketio.on('generate_msg')
def generate_msg(data):
self.current_room_id = request.sid
if self.process.model_ready.value==1:
if self.current_discussion is None:
if self.db.does_last_discussion_have_messages():
@ -582,7 +589,7 @@ class LoLLMsAPPI():
"message":"",
"user_message_id": self.current_user_message_id,
"ai_message_id": self.current_ai_message_id,
}
}, room=self.current_room_id
)
@socketio.on('generate_msg_from')
@ -745,6 +752,8 @@ class LoLLMsAPPI():
"""
if message_type == MSG_TYPE.MSG_TYPE_CHUNK:
self.bot_says += chunk
if message_type == MSG_TYPE.MSG_TYPE_FULL:
self.bot_says = chunk
if message_type.value < 2:
self.socketio.emit('message', {
'data': self.bot_says,
@ -752,7 +761,7 @@ class LoLLMsAPPI():
'ai_message_id':self.current_ai_message_id,
'discussion_id':self.current_discussion.discussion_id,
'message_type': message_type.value
}
}, room=self.current_room_id
)
if self.cancel_gen:
print("Generation canceled")
@ -780,7 +789,7 @@ class LoLLMsAPPI():
"message":message,#markdown.markdown(message),
"user_message_id": self.current_user_message_id,
"ai_message_id": self.current_ai_message_id,
}
}, room=self.current_room_id
)
# prepare query and reception
@ -806,7 +815,7 @@ class LoLLMsAPPI():
'data': self.bot_says,
'ai_message_id':self.current_ai_message_id,
'parent':self.current_user_message_id, 'discussion_id':self.current_discussion.discussion_id
}
}, room=self.current_room_id
)
self.current_discussion.update_message(self.current_ai_message_id, self.bot_says)

33
app.py
View File

@ -40,12 +40,8 @@ from flask import (
)
from flask_socketio import SocketIO, emit
from pathlib import Path
import gc
import yaml
from geventwebsocket.handler import WebSocketHandler
from gevent.pywsgi import WSGIServer
import requests
from concurrent.futures import ThreadPoolExecutor, as_completed
import logging
import psutil
from lollms.binding import LOLLMSConfig
@ -569,8 +565,7 @@ class LoLLMsWebUI(LoLLMsAPPI):
personalities = [f.stem for f in personalities_dir.iterdir() if f.is_dir()]
except Exception as ex:
personalities=[]
if self.config["debug"]:
print(f"No personalities found. Using default one {ex}")
ASCIIColors.error(f"No personalities found. Using default one {ex}")
return jsonify(personalities)
def list_languages(self):
@ -1109,6 +1104,8 @@ def sync_cfg(default_config, config):
del config.config[key]
removed_entries.append(key)
config["version"]=default_config["version"]
return config, added_entries, removed_entries
if __name__ == "__main__":
@ -1184,26 +1181,8 @@ if __name__ == "__main__":
)
args = parser.parse_args()
# The default configuration must be kept unchanged as it is committed to the repository,
# so we have to make a copy that is not comitted
default_config = load_config("configs/config.yaml")
if args.config!="local_config":
args.config = "local_config"
if not lollms_paths.personal_configuration_path/f"local_config.yaml".exists():
print("No local configuration file found. Building from scratch")
shutil.copy(default_config, lollms_paths.personal_configuration_path/f"local_config.yaml")
config_file_path = lollms_paths.personal_configuration_path/f"local_config.yaml"
config = LOLLMSConfig(config_file_path)
if "version" not in config or int(config["version"])<int(default_config["version"]):
#Upgrade old configuration files to new format
print("Configuration file is very old. Replacing with default configuration")
config, added, removed =sync_cfg(default_config, config)
print(f"Added entries : {added}, removed entries:{removed}")
config.save_config(config_file_path)
# Configuration loading part
config = LOLLMSConfig.autoload(lollms_paths)
# Override values in config with command-line arguments
for arg_name, arg_value in vars(args).items():
@ -1212,7 +1191,7 @@ if __name__ == "__main__":
# executor = ThreadPoolExecutor(max_workers=1)
# app.config['executor'] = executor
bot = LoLLMsWebUI(app, socketio, config, config_file_path, lollms_paths)
bot = LoLLMsWebUI(app, socketio, config, config.file_path, lollms_paths)
# chong Define custom WebSocketHandler with error handling
class CustomWebSocketHandler(WebSocketHandler):

View File

@ -1,7 +1,7 @@
# =================== Lord Of Large Language Models Configuration file ===========================
version: 7
binding_name: llama_cpp_official
model_name: Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_0.bin
model_name: airoboros-13b-gpt4.ggmlv3.q4_0.bin
# Host information
host: localhost

View File

@ -10,3 +10,4 @@ gevent-websocket
pyaipersonality>=0.0.14
lollms
langchain
requests

1
web/dist/assets/index-29d93ec2.css vendored Normal file

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

4
web/dist/index.html vendored
View File

@ -6,8 +6,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>GPT4All - WEBUI</title>
<script type="module" crossorigin src="/assets/index-9a571523.js"></script>
<link rel="stylesheet" href="/assets/index-940fed0d.css">
<script type="module" crossorigin src="/assets/index-ce2e3117.js"></script>
<link rel="stylesheet" href="/assets/index-29d93ec2.css">
</head>
<body>
<div id="app"></div>

1
web/src/clip Submodule

@ -0,0 +1 @@
Subproject commit a9b1bf5920416aaeaec965c25dd9e8f98c864f16

View File

@ -218,21 +218,22 @@
<div v-if="configFile.model_name" class="mr-2">|</div>
<div v-if="configFile.model_name"
class=" text-base font-semibold cursor-pointer select-none items-center">
<div v-if="configFile.model_name" class="text-base font-semibold cursor-pointer select-none items-center">
<div class="flex gap-1 items-center">
<img :src="imgModel" class="w-8 h-8 rounded-lg object-fill">
<h3 class="font-bold font-large text-lg line-clamp-1">
{{ configFile.model_name }}
</h3>
<button @click.stop="showInputDialog" class="text-base hover:text-primary-dark ml-1 bg-bg-light-tone dark:bg-bg-dark-tone hover:bg-bg-dark-tone duration-200 rounded-lg px-2 py-1">
+
</button>
</div>
</div>
</div>
</button>
</div>
<div :class="{ 'hidden': mzc_collapsed }" class="flex flex-col mb-2 px-3 pb-0">
<div v-if="models.length > 0" class="mb-2">
@ -587,6 +588,17 @@
transform: scale(1);
}
}
.bg-primary-light {
background-color: aqua
}
.hover:bg-primary-light:hover {
background-color: aquamarine
}
.font-bold {
font-weight: bold;
}
</style>
<script>
import filesize from '../plugins/filesize'
@ -619,6 +631,9 @@ export default {
data() {
return {
// install custom model
showModelInputDialog: false,
modelPath: '',
// Zoo stuff
models: [],
personalities: [],
@ -661,6 +676,32 @@ export default {
created() {
}, methods: {
showInputDialog() {
console.log("Input dialog shown")
this.showModelInputDialog = true;
},
closeInputDialog() {
this.showModelInputDialog = false;
this.modelPath = '';
},
validateModelPath() {
// Perform validation of the model path (e.g., checking if it is a local file or internet link)
// ...
// Trigger the `download_model` endpoint with the path as a POST
this.$axios.post('/download_model', { path: this.modelPath })
.then(response => {
// Handle the response
// ...
})
.catch(error => {
// Handle the error
// ...
});
// Close the input dialog
this.closeInputDialog();
},
collapseAll(val) {
this.bec_collapsed = val
this.mzc_collapsed = val
@ -897,7 +938,6 @@ export default {
this.api_get_req("list_models").then(response => { this.modelsArr = response })
//this.api_get_req("list_personalities_languages").then(response => { this.persLangArr = response })
this.api_get_req("list_personalities_categories").then(response => { this.persCatgArr = response })
this.api_get_req("list_personalities").then(response => { this.persArr = response })
//this.api_get_req("list_languages").then(response => { this.langArr = response })
this.api_get_req("get_config").then(response => {
console.log("Received config")
@ -920,6 +960,10 @@ export default {
this.configFile.personality_folder = response["personality_name"]
console.log("received infos")
});
this.api_get_req("list_personalities").then(response => {
this.persArr = response
console.log(`Listed personalities:\n${response}`)
})
this.api_get_req("disk_usage").then(response => {
this.diskUsage = response
})