mirror of
https://github.com/ParisNeo/lollms-webui.git
synced 2024-12-24 06:36:37 +00:00
Upgraded the hardware management
This commit is contained in:
parent
e868dc09cf
commit
ac7afbb5a6
@ -1574,7 +1574,7 @@ class LoLLMsAPI(LollmsApplication):
|
||||
self.warning("Couldn't add long term memory information to the context. Please verify the vector database") # Add information about the user
|
||||
user_description=""
|
||||
if self.config.use_user_name_in_discussions:
|
||||
user_description="!@>User description:\n"+self.config.user_description
|
||||
user_description="!@>User description:\n"+self.config.user_description+"\n"
|
||||
|
||||
|
||||
# Tokenize the conditionning text and calculate its number of tokens
|
||||
@ -1971,6 +1971,13 @@ class LoLLMsAPI(LollmsApplication):
|
||||
dt=1
|
||||
spd = self.nb_received_tokens/dt
|
||||
ASCIIColors.green(f"Received {self.nb_received_tokens} tokens (speed: {spd:.2f}t/s) ",end="\r",flush=True)
|
||||
antiprompt = self.personality.detect_antiprompt(self.connections[client_id]["generated_text"])
|
||||
if antiprompt:
|
||||
ASCIIColors.warning(f"\nDetected hallucination with antiprompt: {antiprompt}")
|
||||
self.connections[client_id]["generated_text"] = self.remove_text_from_string(self.connections[client_id]["generated_text"],antiprompt)
|
||||
self.update_message(client_id, self.connections[client_id]["generated_text"], parameters, metadata, None, MSG_TYPE.MSG_TYPE_FULL)
|
||||
return False
|
||||
|
||||
self.update_message(client_id, chunk, parameters, metadata, ui=None, msg_type=message_type)
|
||||
return True
|
||||
# Stream the generated text to the frontend
|
||||
@ -1983,6 +1990,7 @@ class LoLLMsAPI(LollmsApplication):
|
||||
if self.personality.processor is not None:
|
||||
ASCIIColors.info("Running workflow")
|
||||
try:
|
||||
self.personality.callback = callback
|
||||
self.personality.processor.run_workflow( prompt, full_prompt, callback)
|
||||
except Exception as ex:
|
||||
trace_exception(ex)
|
||||
|
15
app.py
15
app.py
@ -1254,7 +1254,7 @@ try:
|
||||
ASCIIColors.green("PyTorch uninstalled successfully")
|
||||
reinstall_pytorch_with_cuda()
|
||||
ASCIIColors.yellow("Installing pytorch with cuda support")
|
||||
self.config.enable_gpu=True
|
||||
self.config.hardware_mode="nvidia-tensorcores"
|
||||
return jsonify({'status':res==0})
|
||||
|
||||
|
||||
@ -1992,7 +1992,7 @@ try:
|
||||
return jsonify({"state":True})
|
||||
|
||||
def start_training(self):
|
||||
if self.config.enable_gpu:
|
||||
if self.config.hardware_mode=="nvidia-tensorcores" or self.config.hardware_mode=="nvidia" or self.config.hardware_mode=="apple-intel" or self.config.hardware_mode=="apple-silicon":
|
||||
if not self.lollms_paths.gptqlora_path.exists():
|
||||
# Clone the repository to the target path
|
||||
ASCIIColors.info("No gptqlora found in your personal space.\nCloning the gptqlora repo")
|
||||
@ -2748,17 +2748,6 @@ try:
|
||||
if not user_avatar_path.exists():
|
||||
# If the user avatar doesn't exist, copy the default avatar from the assets folder
|
||||
shutil.copy(default_user_avatar, user_avatar_path)
|
||||
# executor = ThreadPoolExecutor(max_workers=1)
|
||||
# app.config['executor'] = executor
|
||||
# Check if .no_gpu file exists
|
||||
no_gpu_file = Path('.no_gpu')
|
||||
if no_gpu_file.exists():
|
||||
# If the file exists, change self.config.use_gpu to False
|
||||
config.enable_gpu = False
|
||||
config.save_config()
|
||||
|
||||
# Remove the .no_gpu file
|
||||
no_gpu_file.unlink()
|
||||
|
||||
bot = LoLLMsWebUI(args, app, socketio, config, config.file_path, lollms_paths)
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
# =================== Lord Of Large Language Models Configuration file ===========================
|
||||
version: 39
|
||||
version: 40
|
||||
binding_name: null
|
||||
model_name: null
|
||||
|
||||
@ -44,8 +44,8 @@ debug: False
|
||||
auto_update: true
|
||||
auto_save: true
|
||||
auto_title: false
|
||||
# Enables gpu usage
|
||||
enable_gpu: true
|
||||
# Install mode (cpu, cpu-noavx, nvidia-tensorcores, nvidia, amd-noavx, amd, apple-intel, apple-silicon)
|
||||
hardware_mode: nvidia-tensorcores
|
||||
# Automatically open the browser
|
||||
auto_show_browser: true
|
||||
|
||||
|
@ -1 +1 @@
|
||||
Subproject commit c97a61a9e2cd5f5107c5e4b1781fbfbec7fb129e
|
||||
Subproject commit e77c97f2384e97de974cba4c580511b88ae71d2a
|
@ -6,65 +6,36 @@ Description: Singleton class for the LoLLMS web UI.
|
||||
This class provides a singleton instance of the LoLLMS web UI, allowing access to its functionality and data across multiple endpoints.
|
||||
"""
|
||||
|
||||
from lollms.app import LollmsApplication
|
||||
from lollms.server.elf_server import LOLLMSElfServer
|
||||
from lollms.main_config import LOLLMSConfig
|
||||
from lollms.paths import LollmsPaths
|
||||
|
||||
class LoLLMSWebUI(LollmsApplication):
|
||||
__instance = None
|
||||
|
||||
@staticmethod
|
||||
def build_instance(
|
||||
config: LOLLMSConfig,
|
||||
lollms_paths: LollmsPaths,
|
||||
load_binding=True,
|
||||
load_model=True,
|
||||
try_select_binding=False,
|
||||
try_select_model=False,
|
||||
callback=None,
|
||||
socketio = None
|
||||
):
|
||||
if LoLLMSWebUI.__instance is None:
|
||||
LoLLMSWebUI(
|
||||
config,
|
||||
lollms_paths,
|
||||
load_binding=load_binding,
|
||||
load_model=load_model,
|
||||
try_select_binding=try_select_binding,
|
||||
try_select_model=try_select_model,
|
||||
callback=callback,
|
||||
socketio=socketio
|
||||
)
|
||||
return LoLLMSWebUI.__instance
|
||||
@staticmethod
|
||||
def get_instance():
|
||||
return LoLLMSWebUI.__instance
|
||||
|
||||
class LOLLMSWebUI(LOLLMSElfServer):
|
||||
def __init__(
|
||||
self,
|
||||
config: LOLLMSConfig,
|
||||
lollms_paths: LollmsPaths,
|
||||
load_binding=True,
|
||||
load_model=True,
|
||||
load_voice_service=True,
|
||||
load_sd_service=True,
|
||||
try_select_binding=False,
|
||||
try_select_model=False,
|
||||
callback=None,
|
||||
socketio=None
|
||||
) -> None:
|
||||
super().__init__(
|
||||
"LoLLMSWebUI",
|
||||
config,
|
||||
lollms_paths,
|
||||
load_binding=load_binding,
|
||||
load_model=load_model,
|
||||
load_sd_service=load_sd_service,
|
||||
load_voice_service=load_voice_service,
|
||||
try_select_binding=try_select_binding,
|
||||
try_select_model=try_select_model,
|
||||
callback=callback,
|
||||
socketio=socketio
|
||||
)
|
||||
if LoLLMSWebUI.__instance is not None:
|
||||
raise Exception("This class is a singleton!")
|
||||
else:
|
||||
LoLLMSWebUI.__instance = self
|
||||
self.app_name = "LOLLMSWebUI"
|
||||
|
||||
# Other methods and properties of the LoLLMSWebUI singleton class
|
||||
|
@ -11,7 +11,7 @@ from fastapi.staticfiles import StaticFiles
|
||||
from lollms.app import LollmsApplication
|
||||
from lollms.paths import LollmsPaths
|
||||
from lollms.main_config import LOLLMSConfig
|
||||
from lollms_webui import LoLLMSWebUI
|
||||
from lollms_webui import LOLLMSWebUI
|
||||
from pathlib import Path
|
||||
from ascii_colors import ASCIIColors
|
||||
import socketio
|
||||
@ -43,7 +43,9 @@ if __name__ == "__main__":
|
||||
if args.port:
|
||||
config.port=args.port
|
||||
|
||||
LoLLMSWebUI.build_instance(config=config, lollms_paths=lollms_paths, socketio=sio)
|
||||
from lollms.server.endpoints.lollms_infos import *
|
||||
LOLLMSWebUI.build_instance(config=config, lollms_paths=lollms_paths, socketio=sio)
|
||||
|
||||
# Import all endpoints
|
||||
from lollms.server.endpoints.lollms_infos import router
|
||||
|
||||
uvicorn.run(app, host=config.host, port=config.port)
|
@ -3,6 +3,22 @@
|
||||
# This script will install miniconda and git with all dependencies for this project
|
||||
# This enables a user to install this project without manually installing conda and git.
|
||||
|
||||
echo " ___ ___ ___ ___ ___ ___ "
|
||||
echo " /\__\ /\ \ /\__\ /\__\ /\__\ /\ \ "
|
||||
echo " /:/ / /::\ \ /:/ / /:/ / /::| | /::\ \ "
|
||||
echo " /:/ / /:/\:\ \ /:/ / /:/ / /:|:| | /:/\ \ \ "
|
||||
echo " /:/ / /:/ \:\ \ /:/ / /:/ / /:/|:|__|__ _\:\~\ \ \ "
|
||||
echo " /:/__/ /:/__/ \:\__\ /:/__/ /:/__/ /:/ |::::\__\ /\ \:\ \ \__\ "
|
||||
echo " \:\ \ \:\ \ /:/ / \:\ \ \:\ \ \/__/~~/:/ / \:\ \:\ \/__/ "
|
||||
echo " \:\ \ \:\ /:/ / \:\ \ \:\ \ /:/ / \:\ \:\__\ "
|
||||
echo " \:\ \ \:\/:/ / \:\ \ \:\ \ /:/ / \:\/:/ / "
|
||||
echo " \:\__\ \::/ / \:\__\ \:\__\ /:/ / \::/ / "
|
||||
echo " \/__/ \/__/ \/__/ \/__/ \/__/ \/__/ "
|
||||
echo "V8.5 (alpha)"
|
||||
echo "-----------------"
|
||||
echo "By ParisNeo"
|
||||
echo "-----------------"
|
||||
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
@ -19,47 +35,12 @@ if [[ "$PWD" =~ [^#\$\%\&\(\)\*\+\] ]]; then
|
||||
fi
|
||||
|
||||
|
||||
|
||||
|
||||
export PACKAGES_TO_INSTALL=python=3.11 git
|
||||
read -rp "Press Enter to continue..."
|
||||
|
||||
clear
|
||||
|
||||
echo " ___ ___ ___ ___ ___ ___ "
|
||||
echo " /\__\ /\ \ /\__\ /\__\ /\__\ /\ \ "
|
||||
echo " /:/ / /::\ \ /:/ / /:/ / /::| | /::\ \ "
|
||||
echo " /:/ / /:/\:\ \ /:/ / /:/ / /:|:| | /:/\ \ \ "
|
||||
echo " /:/ / /:/ \:\ \ /:/ / /:/ / /:/|:|__|__ _\:\~\ \ \ "
|
||||
echo " /:/__/ /:/__/ \:\__\ /:/__/ /:/__/ /:/ |::::\__\ /\ \:\ \ \__\ "
|
||||
echo " \:\ \ \:\ \ /:/ / \:\ \ \:\ \ \/__/~~/:/ / \:\ \:\ \/__/ "
|
||||
echo " \:\ \ \:\ /:/ / \:\ \ \:\ \ /:/ / \:\ \:\__\ "
|
||||
echo " \:\ \ \:\/:/ / \:\ \ \:\ \ /:/ / \:\/:/ / "
|
||||
echo " \:\__\ \::/ / \:\__\ \:\__\ /:/ / \::/ / "
|
||||
echo " \/__/ \/__/ \/__/ \/__/ \/__/ \/__/ "
|
||||
echo " By ParisNeo"
|
||||
|
||||
echo "Please specify if you want to use a GPU or CPU."
|
||||
echo "*Note* that only NVidea GPUs (cuda) or AMD GPUs (rocm) are supported."
|
||||
echo "A) Enable Cuda (for nvidia GPUS)"
|
||||
echo "B) Enable ROCm (for AMD GPUs)"
|
||||
echo "C) Run CPU mode"
|
||||
echo
|
||||
read -rp "Input> " gpuchoice
|
||||
gpuchoice="${gpuchoice:0:1}"
|
||||
|
||||
if [[ "${gpuchoice^^}" == "A" ]]; then
|
||||
PACKAGES_TO_INSTALL="python=3.10 cuda-toolkit ninja git gcc"
|
||||
CHANNEL="-c nvidia/label/cuda-12.1.1 -c nvidia -c conda-forge"
|
||||
elif [[ "${gpuchoice^^}" == "B" ]]; then
|
||||
PACKAGES_TO_INSTALL="python=3.10 rocm-comgr rocm-smi ninja git gcc"
|
||||
CHANNEL=" -c conda-forge"
|
||||
elif [[ "${gpuchoice^^}" == "C" ]]; then
|
||||
PACKAGES_TO_INSTALL="python=3.10 ninja git gcc"
|
||||
CHANNEL="-c conda-forge"
|
||||
else
|
||||
echo "Invalid choice. Exiting..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Better isolation for virtual environment
|
||||
unset CONDA_SHLVL
|
||||
@ -115,33 +96,28 @@ export CUDA_PATH="$INSTALL_ENV_DIR"
|
||||
if [ -d "lollms-webui" ]; then
|
||||
cd lollms-webui || exit 1
|
||||
git pull
|
||||
git submodule update --init --recursive
|
||||
cd
|
||||
cd lollms-core
|
||||
pip install -e .
|
||||
cd ..
|
||||
cd utilities\safe_store
|
||||
pip install -e .
|
||||
cd ..\..
|
||||
|
||||
else
|
||||
git clone "$REPO_URL"
|
||||
git clone --depth 1 --recurse-submodules "$REPO_URL"
|
||||
git submodule update --init --recursive
|
||||
cd lollms-webui\lollms_core
|
||||
pip install -e .
|
||||
cd ..
|
||||
cd utilities\safe_store
|
||||
pip install -e .
|
||||
cd ..\..
|
||||
|
||||
cd lollms-webui || exit 1
|
||||
fi
|
||||
|
||||
# Initilize all submodules and set them to main branch
|
||||
echo "Initializing submodules"
|
||||
git submodule update --init
|
||||
cd zoos/bindings_zoo
|
||||
git checkout main
|
||||
cd ../personalities_zoo
|
||||
git checkout main
|
||||
cd ../extensions_zoo
|
||||
git checkout main
|
||||
cd ../models_zoo
|
||||
git checkout main
|
||||
|
||||
cd ../..
|
||||
|
||||
cd lollms_core
|
||||
git checkout main
|
||||
|
||||
cd ../utilities/safe_store
|
||||
git checkout main
|
||||
|
||||
cd ../..
|
||||
|
||||
# Loop through each "git+" requirement and uninstall it (workaround for inconsistent git package updating)
|
||||
while IFS= read -r requirement; do
|
||||
if echo "$requirement" | grep -q "git+"; then
|
||||
@ -152,8 +128,6 @@ done < requirements.txt
|
||||
|
||||
# Install the pip requirements
|
||||
python -m pip install -r requirements.txt --upgrade
|
||||
python -m pip install -e lollms_core --upgrade
|
||||
python -m pip install -e utilities/safe_store --upgrade
|
||||
|
||||
|
||||
if [[ -e "../linux_run.sh" ]]; then
|
||||
@ -180,12 +154,10 @@ else
|
||||
cp scripts/linux/linux_update_models.sh ../
|
||||
fi
|
||||
|
||||
if [[ "${gpuchoice^^}" == "C" ]]; then
|
||||
echo "This is a .no_gpu file." > .no_gpu
|
||||
echo "You have chosen to use only CPU on this system."
|
||||
else
|
||||
echo "You have chosen to use GPU on this system."
|
||||
fi
|
||||
|
||||
cd scripts/python/lollms_installer
|
||||
python main.py
|
||||
cd ..
|
||||
|
||||
PrintBigMessage() {
|
||||
echo
|
||||
|
@ -3,6 +3,23 @@
|
||||
# This script will install Miniconda and git with all dependencies for this project.
|
||||
# This enables a user to install this project without manually installing Conda and git.
|
||||
|
||||
echo " ___ ___ ___ ___ ___ ___ "
|
||||
echo " /\__\ /\ \ /\__\ /\__\ /\__\ /\ \ "
|
||||
echo " /:/ / /::\ \ /:/ / /:/ / /::| | /::\ \ "
|
||||
echo " /:/ / /:/\:\ \ /:/ / /:/ / /:|:| | /:/\ \ \ "
|
||||
echo " /:/ / /:/ \:\ \ /:/ / /:/ / /:/|:|__|__ _\:\~\ \ \ "
|
||||
echo " /:/__/ /:/__/ \:\__\ /:/__/ /:/__/ /:/ |::::\__\ /\ \:\ \ \__\ "
|
||||
echo " \:\ \ \:\ \ /:/ / \:\ \ \:\ \ \/__/~~/:/ / \:\ \:\ \/__/ "
|
||||
echo " \:\ \ \:\ /:/ / \:\ \ \:\ \ /:/ / \:\ \:\__\ "
|
||||
echo " \:\ \ \:\/:/ / \:\ \ \:\ \ /:/ / \:\/:/ / "
|
||||
echo " \:\__\ \::/ / \:\__\ \:\__\ /:/ / \::/ / "
|
||||
echo " \/__/ \/__/ \/__/ \/__/ \/__/ \/__/ "
|
||||
echo "V8.5 (alpha)"
|
||||
echo "-----------------"
|
||||
echo "By ParisNeo"
|
||||
echo "-----------------"
|
||||
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
if [[ "$PWD" == *" "* ]]; then
|
||||
@ -21,37 +38,8 @@ read -rp "Press Enter to continue..."
|
||||
|
||||
clear
|
||||
|
||||
echo " ___ ___ ___ ___ ___ ___ "
|
||||
echo " /\__\ /\ \ /\__\ /\__\ /\__\ /\ \ "
|
||||
echo " /:/ / /::\ \ /:/ / /:/ / /::| | /::\ \ "
|
||||
echo " /:/ / /:/\:\ \ /:/ / /:/ / /:|:| | /:/\ \ \ "
|
||||
echo " /:/ / /:/ \:\ \ /:/ / /:/ / /:/|:|__|__ _\:\~\ \ \ "
|
||||
echo " /:/__/ /:/__/ \:\__\ /:/__/ /:/__/ /:/ |::::\__\ /\ \:\ \ \__\ "
|
||||
echo " \:\ \ \:\ \ /:/ / \:\ \ \:\ \ \/__/~~/:/ / \:\ \:\ \/__/ "
|
||||
echo " \:\ \ \:\ /:/ / \:\ \ \:\ \ /:/ / \:\ \:\__\ "
|
||||
echo " \:\ \ \:\/:/ / \:\ \ \:\ \ /:/ / \:\/:/ / "
|
||||
echo " \:\__\ \::/ / \:\__\ \:\__\ /:/ / \::/ / "
|
||||
echo " \/__/ \/__/ \/__/ \/__/ \/__/ \/__/ "
|
||||
echo " By ParisNeo"
|
||||
|
||||
echo "Please specify if you want to use a GPU or CPU."
|
||||
echo "*Note* that only NVidea GPUs (cuda) or AMD GPUs (rocm) are supported."
|
||||
echo "A) Enable GPU"
|
||||
echo "B) Run CPU mode"
|
||||
echo
|
||||
read -rp "Input> " gpuchoice
|
||||
gpuchoice="${gpuchoice:0:1}"
|
||||
uppercase_gpuchoice=$(echo "$gpuchoice" | tr '[:lower:]' '[:upper:]')
|
||||
if [[ "$uppercase_gpuchoice" == "A" ]]; then
|
||||
PACKAGES_TO_INSTALL="python=3.10 cuda-toolkit ninja git"
|
||||
CHANNEL="-c nvidia/label/cuda-12.1.1 -c nvidia -c conda-forge"
|
||||
elif [[ "$uppercase_gpuchoice" == "B" ]]; then
|
||||
PACKAGES_TO_INSTALL="python=3.10 ninja git"
|
||||
CHANNEL="-c conda-forge"
|
||||
else
|
||||
echo "Invalid choice. Exiting..."
|
||||
exit 1
|
||||
fi
|
||||
export PACKAGES_TO_INSTALL=python=3.11 git
|
||||
|
||||
echo "Installing gcc..."
|
||||
brew install gcc
|
||||
@ -67,7 +55,14 @@ export TMP="$PWD/installer_files/temp"
|
||||
MINICONDA_DIR="$PWD/installer_files/miniconda3"
|
||||
INSTALL_ENV_DIR="$PWD/installer_files/lollms_env"
|
||||
ENV_NAME="lollms"
|
||||
MINICONDA_DOWNLOAD_URL="https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh"
|
||||
|
||||
arch=$(uname -m)
|
||||
if [ "$arch" == "arm64" ]; then
|
||||
MINICONDA_DOWNLOAD_URL="https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-MacOSX-arm64.sh"
|
||||
else
|
||||
MINICONDA_DOWNLOAD_URL="https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh"
|
||||
fi
|
||||
|
||||
REPO_URL="https://github.com/ParisNeo/lollms-webui.git"
|
||||
|
||||
if [ ! -f "$MINICONDA_DIR/Scripts/conda" ]; then
|
||||
@ -108,34 +103,28 @@ export CUDA_PATH="$INSTALL_ENV_DIR"
|
||||
if [ -d "lollms-webui" ]; then
|
||||
cd lollms-webui || exit 1
|
||||
git pull
|
||||
git submodule update --init --recursive
|
||||
cd
|
||||
cd lollms-core
|
||||
pip install -e .
|
||||
cd ..
|
||||
cd utilities\safe_store
|
||||
pip install -e .
|
||||
cd ..\..
|
||||
|
||||
else
|
||||
git clone "$REPO_URL"
|
||||
git clone --depth 1 --recurse-submodules "$REPO_URL"
|
||||
git submodule update --init --recursive
|
||||
cd lollms-webui\lollms_core
|
||||
pip install -e .
|
||||
cd ..
|
||||
cd utilities\safe_store
|
||||
pip install -e .
|
||||
cd ..\..
|
||||
|
||||
cd lollms-webui || exit 1
|
||||
fi
|
||||
|
||||
# Initilize all submodules and set them to main branch
|
||||
echo "Initializing submodules"
|
||||
git submodule update --init
|
||||
cd zoos/bindings_zoo
|
||||
git checkout main
|
||||
cd ../personalities_zoo
|
||||
git checkout main
|
||||
cd ../extensions_zoo
|
||||
git checkout main
|
||||
cd ../models_zoo
|
||||
git checkout main
|
||||
|
||||
cd ../..
|
||||
|
||||
cd lollms_core
|
||||
git checkout main
|
||||
|
||||
cd ../utilities/safe_store
|
||||
git checkout main
|
||||
|
||||
cd ../..
|
||||
|
||||
|
||||
# Loop through each "git+" requirement and uninstall it (workaround for inconsistent git package updating)
|
||||
while IFS= read -r requirement; do
|
||||
if echo "$requirement" | grep -q "git+"; then
|
||||
@ -147,10 +136,6 @@ done < requirements.txt
|
||||
# Install the pip requirements
|
||||
python -m pip install -r requirements.txt --upgrade
|
||||
|
||||
python -m pip install -e lollms_core --upgrade
|
||||
|
||||
python -m pip install -e utilities/safe_store --upgrade
|
||||
|
||||
if [[ -e "../macos_run.sh" ]]; then
|
||||
echo "Macos run found"
|
||||
else
|
||||
@ -163,13 +148,17 @@ else
|
||||
cp scripts/macos/macos_update.sh ../
|
||||
fi
|
||||
|
||||
uppercase_gpuchoice=$(echo "$gpuchoice" | tr '[:lower:]' '[:upper:]')
|
||||
if [[ "$uppercase_gpuchoice" == "B" ]]; then
|
||||
echo "This is a .no_gpu file." > .no_gpu
|
||||
if [[ -e "../macos_conda_session.sh" ]]; then
|
||||
echo "Macos conda session found"
|
||||
else
|
||||
echo "GPU is enabled, no .no_gpu file will be created."
|
||||
cp scripts/macos/macos_conda_session.sh ../
|
||||
fi
|
||||
|
||||
|
||||
cd scripts/python/lollms_installer
|
||||
python main.py
|
||||
cd ..
|
||||
|
||||
PrintBigMessage() {
|
||||
echo
|
||||
echo "*******************************************************************"
|
||||
|
@ -4,24 +4,40 @@
|
||||
<h1 class="text-4xl font-bold mb-4">LOLLMS installation tool</h1>
|
||||
<p class="text-left">
|
||||
Welcome to the installer of lollms. Here you can select your install profile.<br>
|
||||
Let's start by selecting the install mode.<br><br>
|
||||
Let's start by selecting the hardware.<br><br>
|
||||
</p>
|
||||
<div class="flex flex-col gap-2">
|
||||
<label class="flex items-center">
|
||||
<input type="radio" value="cuda" v-model="selectedOption" class="mr-2">
|
||||
Use NVIDIA GPU with CUDA
|
||||
</label>
|
||||
<label class="flex items-center">
|
||||
<input type="radio" value="rocm" v-model="selectedOption" class="mr-2">
|
||||
Use AMD GPU with ROCm
|
||||
<input type="radio" value="cpu-noavx" v-model="selectedOption" class="mr-2">
|
||||
Use CPU without AVX (for old CPUs)
|
||||
</label>
|
||||
<label class="flex items-center">
|
||||
<input type="radio" value="cpu" v-model="selectedOption" class="mr-2">
|
||||
Use CPU (no GPU)
|
||||
Use CPU with AVX support (new CPUs)
|
||||
</label>
|
||||
<label class="flex items-center">
|
||||
<input type="radio" value="metal" v-model="selectedOption" class="mr-2">
|
||||
Use Metal (for Apple Silicon like M1 and M2)
|
||||
<input type="radio" value="nvidia" v-model="selectedOption" class="mr-2">
|
||||
Use NVIDIA GPU without tensorcore (for old GPUs)
|
||||
</label>
|
||||
<label class="flex items-center">
|
||||
<input type="radio" value="nvidia-tensorcores" v-model="selectedOption" class="mr-2">
|
||||
Use NVIDIA GPU with tensorcore (new GPUs)
|
||||
</label>
|
||||
<label class="flex items-center">
|
||||
<input type="radio" value="amd-noavx" v-model="selectedOption" class="mr-2">
|
||||
Use AMD GPU with no avx
|
||||
</label>
|
||||
<label class="flex items-center">
|
||||
<input type="radio" value="amd" v-model="selectedOption" class="mr-2">
|
||||
Use AMD GPU
|
||||
</label>
|
||||
<label class="flex items-center">
|
||||
<input type="radio" value="apple-intel" v-model="selectedOption" class="mr-2">
|
||||
Apple with intel CPU
|
||||
</label>
|
||||
<label class="flex items-center">
|
||||
<input type="radio" value="apple-silicon" v-model="selectedOption" class="mr-2">
|
||||
Apple silicon (M1, M2 M3)
|
||||
</label>
|
||||
</div>
|
||||
<button @click="install" class="bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 mt-4 rounded">
|
||||
|
@ -61,7 +61,7 @@ lollms_app = LollmsApplication(
|
||||
load_binding=False,
|
||||
load_model=False,
|
||||
load_voice_service=False,
|
||||
load
|
||||
load_sd_service=False,
|
||||
socketio=sio)
|
||||
|
||||
# Serve the index.html file for all routes
|
||||
@ -91,39 +91,59 @@ def start_installing(data: InstallProperties):
|
||||
Returns:
|
||||
- A dictionary with a "message" key indicating the success of the installation.
|
||||
"""
|
||||
# Install mode (cpu, cpu-noavx, nvidia-tensorcores, nvidia, amd-noavx, amd, apple-intel, apple-silicon)
|
||||
if data.mode=="cpu":
|
||||
config.enable_gpu=False
|
||||
config.hardware_mode="cpu"
|
||||
try:
|
||||
lollms_app.ShowBlockingMessage("Installing pytorch for CPU")
|
||||
reinstall_pytorch_with_cpu()
|
||||
lollms_app.ShowBlockingMessage("Setting hardware configuration to CPU")
|
||||
config.save_config()
|
||||
lollms_app.HideBlockingMessage()
|
||||
except:
|
||||
lollms_app.HideBlockingMessage()
|
||||
|
||||
elif data.mode=="cuda":
|
||||
config.enable_gpu=True
|
||||
if data.mode=="cpu-noavx":
|
||||
config.hardware_mode="cpu-noavx"
|
||||
try:
|
||||
lollms_app.ShowBlockingMessage("Setting hardware configuration to CPU with no avx support")
|
||||
config.save_config()
|
||||
lollms_app.HideBlockingMessage()
|
||||
except:
|
||||
lollms_app.HideBlockingMessage()
|
||||
elif data.mode=="nvidia":
|
||||
config.hardware_mode="nvidia"
|
||||
try:
|
||||
lollms_app.ShowBlockingMessage("Installing pytorch for nVidia GPU (cuda)")
|
||||
reinstall_pytorch_with_cuda()
|
||||
config.save_config()
|
||||
lollms_app.HideBlockingMessage()
|
||||
except:
|
||||
lollms_app.HideBlockingMessage()
|
||||
elif data.mode=="rocm":
|
||||
config.enable_gpu=True
|
||||
elif data.mode=="nvidia-tensorcores":
|
||||
config.hardware_mode="nvidia-tensorcores"
|
||||
try:
|
||||
lollms_app.ShowBlockingMessage("Installing pytorch for nVidia GPU (cuda)")
|
||||
config.save_config()
|
||||
lollms_app.HideBlockingMessage()
|
||||
except:
|
||||
lollms_app.HideBlockingMessage()
|
||||
elif data.mode=="amd":
|
||||
config.hardware_mode="amd"
|
||||
try:
|
||||
lollms_app.ShowBlockingMessage("Installing pytorch for AMD GPU (rocm)")
|
||||
reinstall_pytorch_with_rocm()
|
||||
config.save_config()
|
||||
lollms_app.HideBlockingMessage()
|
||||
except:
|
||||
lollms_app.HideBlockingMessage()
|
||||
elif data.mode=="metal":
|
||||
elif data.mode=="apple-silicon":
|
||||
config.hardware_mode="apple-silicon"
|
||||
try:
|
||||
lollms_app.ShowBlockingMessage("Installing pytorch for Apple Silicon (Metal)")
|
||||
config.save_config()
|
||||
lollms_app.HideBlockingMessage()
|
||||
except:
|
||||
lollms_app.HideBlockingMessage()
|
||||
elif data.mode=="apple-intel":
|
||||
config.hardware_mode="apple-intel"
|
||||
try:
|
||||
lollms_app.ShowBlockingMessage("Installing pytorch for Apple Silicon (Metal)")
|
||||
config.enable_gpu=False
|
||||
reinstall_pytorch_with_cpu()
|
||||
config.save_config()
|
||||
lollms_app.HideBlockingMessage()
|
||||
except:
|
||||
|
@ -152,7 +152,6 @@ echo Install failed
|
||||
goto endend
|
||||
:end
|
||||
|
||||
cd
|
||||
cd scripts\python\lollms_installer
|
||||
call python main.py
|
||||
cd ..
|
||||
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
4
web/dist/index.html
vendored
4
web/dist/index.html
vendored
@ -6,8 +6,8 @@
|
||||
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>LoLLMS WebUI - Welcome</title>
|
||||
<script type="module" crossorigin src="/assets/index-503d2057.js"></script>
|
||||
<link rel="stylesheet" href="/assets/index-63438daa.css">
|
||||
<script type="module" crossorigin src="/assets/index-33cdc340.js"></script>
|
||||
<link rel="stylesheet" href="/assets/index-a8faae1b.css">
|
||||
</head>
|
||||
<body>
|
||||
<div id="app"></div>
|
||||
|
@ -228,6 +228,32 @@
|
||||
<div class="flex flex-col mb-2 px-3 pb-2">
|
||||
<Card title="General" :is_subcard="true" class="pb-2 m-2">
|
||||
<table class="expand-to-fit bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block p-2.5 dark:bg-gray-700 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500">
|
||||
|
||||
<tr>
|
||||
<td style="min-width: 200px;">
|
||||
<label for="hardware_mode" class="text-sm font-bold" style="margin-right: 1rem;">Hardware mode:</label>
|
||||
</td>
|
||||
<td class="text-center items-center">
|
||||
<div class="flex flex-row">
|
||||
<select
|
||||
id="hardware_mode"
|
||||
required
|
||||
v-model="configFile.hardware_mode"
|
||||
@change="settingsChanged=true"
|
||||
class="m-2 h-50 w-50 py-1 border border-gray-300 rounded dark:bg-gray-600"
|
||||
>
|
||||
<option value="cpu">CPU</option>
|
||||
<option value="cpu-noavx">CPU (No AVX)</option>
|
||||
<option value="nvidia-tensorcores">NVIDIA (Tensor Cores)</option>
|
||||
<option value="nvidia">NVIDIA</option>
|
||||
<option value="amd-noavx">AMD (No AVX)</option>
|
||||
<option value="amd">AMD</option>
|
||||
<option value="apple-intel">Apple Intel</option>
|
||||
<option value="apple-silicon">Apple Silicon</option>
|
||||
</select>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td style="min-width: 200px;">
|
||||
<label for="db_path" class="text-sm font-bold" style="margin-right: 1rem;">Host:</label>
|
||||
@ -309,26 +335,6 @@
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td style="min-width: 200px;">
|
||||
<label for="enable_gpu" class="text-sm font-bold" style="margin-right: 1rem;">Enable GPU:</label>
|
||||
</td>
|
||||
<td class="text-center items-center">
|
||||
<div class="flex flex-row">
|
||||
<input
|
||||
type="checkbox"
|
||||
id="enable_gpu"
|
||||
required
|
||||
v-model="configFile.enable_gpu"
|
||||
@change="settingsChanged=true"
|
||||
class="m-2 h-50 w-50 py-1 border border-gray-300 rounded dark:bg-gray-600 "
|
||||
>
|
||||
<button v-if="!configFile.enable_gpu" @click.prevent="upgrade2GPU" class="w-100 text-center rounded m-2 bg-blue-300 hover:bg-blue-200 text-l hover:text-primary p-2 m-2 text-left flex flex-row ">
|
||||
Upgrade from CPU to GPU
|
||||
</button>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
||||
<td style="min-width: 200px;">
|
||||
@ -2969,27 +2975,6 @@ export default {
|
||||
return { 'status': false }
|
||||
});
|
||||
},
|
||||
upgrade2GPU(){
|
||||
this.isLoading = true
|
||||
try{
|
||||
axios.get('/upgrade_to_gpu').then(res => {
|
||||
this.isLoading = false
|
||||
if (res) {
|
||||
if(res.status){
|
||||
this.$store.state.toast.showToast("Upgraded to GPU", 4, true)
|
||||
this.configFile.enable_gpu=true
|
||||
}
|
||||
else{
|
||||
this.$store.state.toast.showToast("Could not upgrade to GPU. Endpoint error: " + res.error, 4, false)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
catch (error) {
|
||||
this.isLoading = false
|
||||
this.$store.state.toast.showToast("Could not open binding settings. Endpoint error: " + error.message, 4, false)
|
||||
}
|
||||
},
|
||||
onSettingsExtension(extensionEntry){
|
||||
try {
|
||||
this.isLoading = true
|
||||
@ -4142,13 +4127,13 @@ export default {
|
||||
},
|
||||
},
|
||||
|
||||
enable_gpu:{
|
||||
hardware_mode:{
|
||||
get() {
|
||||
return this.$store.state.config.enable_gpu;
|
||||
return this.$store.state.config.hardware_mode;
|
||||
},
|
||||
set(value) {
|
||||
// You should not set the value directly here; use the updateSetting method instead
|
||||
this.$store.state.config.enable_gpu = value
|
||||
this.$store.state.config.hardware_mode = value
|
||||
},
|
||||
|
||||
},
|
||||
|
@ -1 +1 @@
|
||||
Subproject commit 38df1f31eba840f1219621dcd9f62f3cd5d9ffd7
|
||||
Subproject commit b704a1424e91e1b33a21c7c517a742144dea261e
|
Loading…
Reference in New Issue
Block a user