Merge branch 'main' into lollms-pers-mounter

This commit is contained in:
AndzejsP 2023-06-10 11:39:03 +03:00
commit 17cc14de3d
12 changed files with 144 additions and 79 deletions

View File

@ -12,8 +12,9 @@ from api.db import DiscussionsDB
from api.helpers import compare_lists
from pathlib import Path
import importlib
from lollms import AIPersonality, lollms_path, MSG_TYPE
from lollms import AIPersonality, MSG_TYPE
from lollms.binding import BindingConfig
from lollms.paths import lollms_path, lollms_personal_configuration_path, lollms_personal_path, lollms_personal_models_path, lollms_bindings_zoo_path, lollms_personalities_zoo_path, lollms_default_cfg_path
import multiprocessing as mp
import threading
import time
@ -91,7 +92,7 @@ class ModelProcess:
self.set_config_queue = mp.Queue(maxsize=1)
self.set_config_result_queue = mp.Queue(maxsize=1)
self.models_path = Path('models')
self.models_path = lollms_personal_models_path
self.process = None
# Create synchronization objects
@ -149,7 +150,7 @@ class ModelProcess:
module = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(module)
if hasattr(module, "Install"):
module.Install(self)
module.Install(self.config)
# define the full absolute path to the module
absolute_path = binding_path.resolve()
@ -222,7 +223,7 @@ class ModelProcess:
self.binding = self.load_binding(self.config["binding_name"], install=True)
print("Binding loaded successfully")
try:
model_file = self.models_path/self.config["binding_name"]/self.config["model_name"]
model_file = self.config.models_path/self.config["binding_name"]/self.config["model_name"]
print(f"Loading model : {model_file}")
self.model = self.binding(self.config)
self.model_ready.value = 1
@ -252,7 +253,7 @@ class ModelProcess:
for personality in self.config['personalities']:
try:
print(f" {personality}")
personality_path = lollms_path/f"personalities_zoo/{personality}"
personality_path = lollms_personalities_zoo_path/f"{personality}"
personality = AIPersonality(personality_path, run_scripts=False)
mounted_personalities.append(personality)
except Exception as ex:
@ -293,6 +294,7 @@ class ModelProcess:
self._set_config_result['personalities_status'] ='semi_failed'
self.personality = self.mounted_personalities[self.config['active_personality_id']]
self.mounted_personalities = self.config["personalities"]
print("Personality set successfully")
def _run(self):
@ -347,6 +349,7 @@ class ModelProcess:
self.start_signal.clear()
print("Finished executing the generation")
except Exception as ex:
print("Couldn't start generation")
print(ex)
time.sleep(1)
def _generate(self, prompt, n_predict=50, callback=None):
@ -464,10 +467,9 @@ class LoLLMsAPPI():
#Create and launch the process
self.process = ModelProcess(config)
self.config = config
self.binding = self.process.rebuild_binding(self.config)
self.personalities = self.process.rebuild_personalities()
self.personality = self.personalities[self.config["active_personality_id"]]
self.mounted_personalities = self.process.rebuild_personalities()
self.personality = self.mounted_personalities[self.config["active_personality_id"]]
if config["debug"]:
print(print(f"{self.personality}"))
self.config_file_path = config_file_path

78
app.py
View File

@ -26,6 +26,7 @@ import subprocess
import signal
from lollms import AIPersonality, lollms_path, MSG_TYPE
from lollms.console import ASCIIColors
from lollms.paths import lollms_default_cfg_path, lollms_bindings_zoo_path, lollms_personalities_zoo_path, lollms_personal_path, lollms_personal_configuration_path, lollms_personal_models_path
from api.db import DiscussionsDB, Discussion
from api.helpers import compare_lists
from flask import (
@ -140,6 +141,8 @@ class LoLLMsWebUI(LoLLMsAPPI):
self.add_endpoint("/personalities/<path:filename>", "serve_personalities", self.serve_personalities, methods=["GET"])
self.add_endpoint("/outputs/<path:filename>", "serve_outputs", self.serve_outputs, methods=["GET"])
self.add_endpoint("/data/<path:filename>", "serve_data", self.serve_data, methods=["GET"])
self.add_endpoint("/help/<path:filename>", "serve_help", self.serve_help, methods=["GET"])
self.add_endpoint("/uploads/<path:filename>", "serve_uploads", self.serve_uploads, methods=["GET"])
@ -292,7 +295,7 @@ class LoLLMsWebUI(LoLLMsAPPI):
return jsonify({"personality":self.personality.as_dict()})
def get_all_personalities(self):
personalities_folder = lollms_path/"personalities_zoo"
personalities_folder = lollms_personalities_zoo_path
personalities = {}
for language_folder in personalities_folder.iterdir():
lang = language_folder.stem
@ -429,9 +432,16 @@ class LoLLMsWebUI(LoLLMsAPPI):
elif setting_name== "personality_folder":
self.personality_name=data['setting_value']
personality_fn = lollms_path/f"personalities_zoo/{self.personality_language}/{self.personality_category}/{self.personality_name}"
self.personality.load_personality(personality_fn)
if len(self.config["personalities"])>0:
if self.config["active_personality_id"]<len(self.config["personalities"]):
self.config["personalities"][self.config["active_personality_id"]] = f"{self.personality_language}/{self.personality_category}/{self.personality_name}"
else:
self.config["active_personality_id"] = 0
self.config["personalities"][self.config["active_personality_id"]] = f"{self.personality_language}/{self.personality_category}/{self.personality_name}"
personality_fn = lollms_personalities_zoo_path/self.config["personalities"][self.config["active_personality_id"]]
self.personality.load_personality(personality_fn)
else:
self.config["personalities"].append(f"{self.personality_language}/{self.personality_category}/{self.personality_name}")
elif setting_name== "override_personality_model_parameters":
self.config["override_personality_model_parameters"]=bool(data['setting_value'])
@ -493,7 +503,7 @@ class LoLLMsWebUI(LoLLMsAPPI):
current_drive = Path.cwd().anchor
drive_disk_usage = psutil.disk_usage(current_drive)
try:
models_folder_disk_usage = psutil.disk_usage(f'./models/{self.config["binding_name"]}')
models_folder_disk_usage = psutil.disk_usage(lollms_personal_models_path/f'{self.config["binding_name"]}')
return jsonify({
"total_space":drive_disk_usage.total,
"available_space":drive_disk_usage.free,
@ -511,7 +521,7 @@ class LoLLMsWebUI(LoLLMsAPPI):
})
def list_bindings(self):
bindings_dir = lollms_path/'bindings_zoo' # replace with the actual path to the models folder
bindings_dir = lollms_bindings_zoo_path # replace with the actual path to the models folder
bindings=[]
for f in bindings_dir.iterdir():
card = f/"binding_card.yaml"
@ -520,7 +530,7 @@ class LoLLMsWebUI(LoLLMsAPPI):
bnd = load_config(card)
bnd["folder"]=f.stem
icon_path = Path(f"bindings/{f.name}/logo.png")
if Path(lollms_path/f"bindings_zoo/{f.name}/logo.png").exists():
if Path(lollms_bindings_zoo_path/f"{f.name}/logo.png").exists():
bnd["icon"]=str(icon_path)
bindings.append(bnd)
@ -538,18 +548,18 @@ class LoLLMsWebUI(LoLLMsAPPI):
def list_personalities_languages(self):
personalities_languages_dir = lollms_path/f'personalities_zoo' # replace with the actual path to the models folder
personalities_languages_dir = lollms_personalities_zoo_path # replace with the actual path to the models folder
personalities_languages = [f.stem for f in personalities_languages_dir.iterdir() if f.is_dir()]
return jsonify(personalities_languages)
def list_personalities_categories(self):
personalities_categories_dir = lollms_path/f'personalities_zoo/{self.personality_language}' # replace with the actual path to the models folder
personalities_categories_dir = lollms_personalities_zoo_path/f'{self.personality_language}' # replace with the actual path to the models folder
personalities_categories = [f.stem for f in personalities_categories_dir.iterdir() if f.is_dir()]
return jsonify(personalities_categories)
def list_personalities(self):
try:
personalities_dir = lollms_path/f'personalities_zoo/{self.personality_language}/{self.personality_category}' # replace with the actual path to the models folder
personalities_dir = lollms_personalities_zoo_path/f'{self.personality_language}/{self.personality_category}' # replace with the actual path to the models folder
personalities = [f.stem for f in personalities_dir.iterdir() if f.is_dir()]
except Exception as ex:
personalities=[]
@ -617,34 +627,46 @@ class LoLLMsWebUI(LoLLMsAPPI):
return send_from_directory(path, fn)
def serve_bindings(self, filename):
path = str(lollms_path/('bindings_zoo/'+"/".join(filename.split("/")[:-1])))
path = str(lollms_bindings_zoo_path/("/".join(filename.split("/")[:-1])))
fn = filename.split("/")[-1]
return send_from_directory(path, fn)
def serve_personalities(self, filename):
path = str(lollms_path/('personalities_zoo/'+"/".join(filename.split("/")[:-1])))
path = str(lollms_personalities_zoo_path/("/".join(filename.split("/")[:-1])))
fn = filename.split("/")[-1]
return send_from_directory(path, fn)
def serve_outputs(self, filename):
root_dir = os.getcwd()
path = os.path.join(root_dir, 'outputs/')+"/".join(filename.split("/")[:-1])
root_dir = lollms_personal_path / "outputs"
root_dir.mkdir(exist_ok=True, parents=True)
path = str(root_dir/"/".join(filename.split("/")[:-1]))
fn = filename.split("/")[-1]
return send_from_directory(path, fn)
def serve_help(self, filename):
root_dir = Path(__file__).parent/f"help"
root_dir.mkdir(exist_ok=True, parents=True)
path = str(root_dir/"/".join(filename.split("/")[:-1]))
fn = filename.split("/")[-1]
return send_from_directory(path, fn)
def serve_data(self, filename):
root_dir = os.getcwd()
path = os.path.join(root_dir, 'data/')+"/".join(filename.split("/")[:-1])
root_dir = lollms_personal_path / "data"
root_dir.mkdir(exist_ok=True, parents=True)
path = str(root_dir/"/".join(filename.split("/")[:-1]))
fn = filename.split("/")[-1]
return send_from_directory(path, fn)
def serve_uploads(self, filename):
root_dir = os.getcwd()
path = os.path.join(root_dir, 'uploads/')+"/".join(filename.split("/")[:-1])
root_dir = lollms_personal_path / "uploads"
root_dir.mkdir(exist_ok=True, parents=True)
path = str(root_dir+"/".join(filename.split("/")[:-1]))
fn = filename.split("/")[-1]
return send_from_directory(path, fn)
@ -702,7 +724,7 @@ class LoLLMsWebUI(LoLLMsAPPI):
if config_file.exists():
self.config["personalities"].append(package_path)
self.personalities = self.process.rebuild_personalities()
self.personality = self.personalities[self.config["active_personality_id"]]
self.personality = self.mounted_personalities[self.config["active_personality_id"]]
self.apply_settings()
return jsonify({"status": True,
"personalities":self.config["personalities"],
@ -730,7 +752,7 @@ class LoLLMsWebUI(LoLLMsAPPI):
self.config["active_personality_id"]=0
if len(self.config["personalities"])>0:
self.personalities = self.process.rebuild_personalities()
self.personality = self.personalities[self.config["active_personality_id"]]
self.personality = self.mounted_personalities[self.config["active_personality_id"]]
else:
self.personalities = []
self.personality = None
@ -747,7 +769,7 @@ class LoLLMsWebUI(LoLLMsAPPI):
id = request.files['id']
if id<len(self.config["personalities"]):
self.config["active_personality_id"]=id
self.personality = self.personalities[self.config["active_personality_id"]]
self.personality = self.mounted_personalities[self.config["active_personality_id"]]
self.apply_settings()
return jsonify({"status": True})
else:
@ -962,7 +984,7 @@ class LoLLMsWebUI(LoLLMsAPPI):
path = f'{server}{filename}'
else:
path = f'{server}/{filename}'
local_path = Path(f'./models/{self.config["binding_name"]}/{filename}')
local_path = lollms_personal_models_path/f'{self.config["binding_name"]}/{filename}'
is_installed = local_path.exists() or model_type.lower()=="api"
models.append({
'title': filename,
@ -1133,16 +1155,16 @@ if __name__ == "__main__":
# The default configuration must be kept unchanged as it is committed to the repository,
# so we have to make a copy that is not comitted
default_config = load_config(f"configs/config.yaml")
default_config = load_config("configs/config.yaml")
if args.config!="local_config":
args.config = "local_config"
if not Path(f"configs/local_config.yaml").exists():
if not lollms_personal_configuration_path/f"local_config.yaml".exists():
print("No local configuration file found. Building from scratch")
shutil.copy(f"configs/config.yaml", f"configs/local_config.yaml")
shutil.copy(default_config, lollms_personal_configuration_path/f"local_config.yaml")
config_file_path = f"configs/{args.config}.yaml"
config = BindingConfig(config_file_path, Path("./models"))
config_file_path = lollms_personal_configuration_path/f"local_config.yaml"
config = BindingConfig(config_file_path)
if "version" not in config or int(config["version"])<int(default_config["version"]):

10
help/faqs.csv Normal file
View File

@ -0,0 +1,10 @@
question,answer
What is GPT4ALL WebUI?,GPT4ALL WebUI is a user-friendly interface that provides access to various Large Language Model (LLM) models for a wide range of tasks.
What are the features of GPT4ALL WebUI?,The features of GPT4ALL WebUI include:<br>- Choosing preferred binding, model, and personality<br>- Enhancing emails, essays, code debugging, thought organization, and more<br>- Exploring functionalities like searching, data organization, and image generation<br>- Easy-to-use UI with light and dark mode options<br>- Integration with GitHub repository<br>- Support for different personalities with predefined welcome messages<br>- Thumb up/down rating for generated answers<br>- Copy, edit, and remove messages<br>- Local database storage for discussions<br>- Search, export, and delete multiple discussions<br>- Support for Docker, conda, and manual virtual environment setups
Where can I find a tutorial on how to use the tool?,You can find a tutorial on how to use the tool on our YouTube channel. Click here to watch the tutorial: <a href="https://youtu.be/ds_U0TDzbzI">YouTube Tutorial</a>
What are the prerequisites for installing GPT4ALL WebUI?,The prerequisites for installing GPT4ALL WebUI are:<br>- Python 3.10 or higher<br>- Git (for cloning the repository)<br>Make sure Python is in your system's PATH and verify the Python version by running 'python --version' in the terminal.
How do I install GPT4ALL WebUI?,There are different installation methods for GPT4ALL WebUI:<br>1. Easy install:<br>- Download the appropriate launcher for your platform (webui.bat for Windows, webui.sh for Linux).<br>- Place the launcher in a folder of your choice.<br>- Run the launcher script.<br>2. Using Conda:<br>- Clone the project or download the zip file from the GitHub repository.<br>- Create a new conda environment, activate it, and install the requirements.<br>3. Using Docker:<br>- Refer to the Docker documentation for installation instructions specific to your operating system.
How do I launch the app?,You can launch the app by running the webui.sh or webui.bat launcher. Alternatively, you can activate the virtual environment and launch the application using 'python app.py' from the project's root directory.
How do I select a model and binding?,To select a model and binding:<br>- Open the GPT4ALL WebUI and go to the Settings page.<br>- In the Models Zoo tab, choose a binding from the list.<br>- Wait for the installation process to finish and click the Install button next to the desired model.<br>- After the model installation is complete, select the model and apply the changes.<br>- Don't forget to save the configuration.
How do I start a discussion?,To start a discussion:<br>- Go to the Discussions view.<br>- Click the + button to create a new discussion.<br>- You will see a predefined welcome message based on the selected personality.<br>- Ask a question or provide an initial prompt to start the discussion.<br>- You can stop the generation process at any time by pressing the Stop Generating button.
How do I manage discussions?,To manage discussions:<br>- To edit a discussion title, simply type a new title or modify the existing one.<br>- To delete a discussion, click the Delete button.<br>- To search for specific discussions, use the search button and enter relevant keywords.<br>- To perform batch operations (exporting
Can't render this file because it contains an unexpected character in line 4 and column 166.

View File

View File

1
web/dist/assets/index-940fed0d.css vendored Normal file

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

4
web/dist/index.html vendored
View File

@ -6,8 +6,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>GPT4All - WEBUI</title>
<script type="module" crossorigin src="/assets/index-3f0fa85c.js"></script>
<link rel="stylesheet" href="/assets/index-ec14e823.css">
<script type="module" crossorigin src="/assets/index-9a571523.js"></script>
<link rel="stylesheet" href="/assets/index-940fed0d.css">
</head>
<body>
<div id="app"></div>

View File

@ -29,7 +29,7 @@
<b>Manual download:&nbsp;</b>
<a :href="path" @click.stop class="flex items-center hover:text-secondary duration-75 active:scale-90"
title="Download this manually (faster) and put it in the models/<your binding> folder then refresh">
title="Download this manually (faster) and put it in the models/<your binding> folder under your home directory/Documents/lollms folder then refresh">
{{ title }}

View File

@ -1,12 +1,12 @@
<template>
<div class="container mx-auto p-4 bg-bg-light-tone dark:bg-bg-dark-tone shadow-lg">
<div class="mb-8">
<div class="mb-8 overflow-y-auto max-h-96 scrollbar">
<h2 class="text-2xl font-bold mb-2">Frequently Asked Questions</h2>
<ul class="list-disc pl-4">
<li v-for="(faq, index) in faqs" :key="index">
<h3 class="text-xl font-bold mb-1">{{ faq.question }}</h3>
<p class="mb-4">{{ faq.answer }}</p>
<p class="mb-4" v-html="parseMultiline(faq.answer)"></p>
</li>
</ul>
</div>
@ -41,20 +41,51 @@
methods: {
loadFAQs() {
// Fetch and parse the CSV file
fetch('/data/faqs.csv')
fetch('/help/faqs.csv')
.then((response) => response.text())
.then((csv) => {
const { data } = Papa.parse(csv, { header: true }); // Parse the CSV and extract data
console.log("Recovered data")
console.log(data)
this.faqs = data; // Assign the parsed data to the faqs array
})
.catch((error) => {
console.error('Error loading FAQs:', error);
});
},
parseMultiline(text) {
// Replace newline characters with HTML line breaks
return text.replace(/\n/g, '<br>');
},
},
};
</script>
<style scoped>
.scrollbar {
/* Add your scrollbar container styles here */
scrollbar-width: thin;
scrollbar-color: var(--scrollbar-thumb-color) var(--scrollbar-track-color);
white-space: pre-wrap; /* Preserve line breaks */
overflow-wrap: break-word; /* Break words if necessary */
}
.scrollbar::-webkit-scrollbar {
width: 8px;
}
.scrollbar::-webkit-scrollbar-track {
background-color: var(--scrollbar-track-color);
}
.scrollbar::-webkit-scrollbar-thumb {
background-color: var(--scrollbar-thumb-color);
border-radius: 4px;
}
.scrollbar::-webkit-scrollbar-thumb:hover {
background-color: var(--scrollbar-thumb-hover-color);
}
</style>