mirror of
https://github.com/ParisNeo/lollms-webui.git
synced 2024-12-18 20:17:50 +00:00
upgraded model installation
This commit is contained in:
parent
8b6bfacc5d
commit
72203f40e5
@ -123,7 +123,7 @@ if [ ! -f "models/gpt4all-lora-quantized-ggml.bin" ]; then
|
||||
echo ""
|
||||
read -p "The default model file (gpt4all-lora-quantized-ggml.bin) does not exist. Do you want to download it? Press Y to download it with a browser (faster)." yn
|
||||
case $yn in
|
||||
[Yy]* ) open "https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized-ggml.bin"
|
||||
[Yy]* ) open "https://huggingface.co/ParisNeo/GPT4All/resolve/main/gpt4all-lora-quantized-ggml.bin"
|
||||
echo "Link has been opened with the default web browser, make sure to save it into the models folder before continuing. Press any key to continue..."
|
||||
read -n 1 -s;;
|
||||
* ) echo "Skipping download of model file...";;
|
||||
@ -132,97 +132,13 @@ else
|
||||
echo ""
|
||||
read -p "The default model file (gpt4all-lora-quantized-ggml.bin) already exists. Do you want to replace it? Press Y to download it with a browser (faster)." yn
|
||||
case $yn in
|
||||
[Yy]* ) open "https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized-ggml.bin"
|
||||
[Yy]* ) open "https://huggingface.co/ParisNeo/GPT4All/resolve/main/gpt4all-lora-quantized-ggml.bin"
|
||||
echo "Link has been opened with the default web browser, make sure to save it into the models folder before continuing. Press any key to continue..."
|
||||
read -n 1 -s;;
|
||||
* ) echo "Skipping download of model file...";;
|
||||
esac
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Downloading latest model..."
|
||||
curl -o "models/gpt4all-lora-quantized-ggml.bin" "https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized-ggml.bin"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to download model. Please check your internet connection."
|
||||
read -p "Do you want to try downloading again? Press Y to download." yn
|
||||
case $yn in
|
||||
[Yy]* ) echo "Downloading latest model..."
|
||||
curl -o "models/gpt4all-lora-quantized-ggml.bin" "https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized-ggml.bin";;
|
||||
* ) echo "Skipping download of model file...";;
|
||||
esac
|
||||
else
|
||||
echo "Model successfully downloaded."
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "In order to make a model work, it needs to go through the LLaMA tokenizer, this will fix errors with the model in run.bat. Do you want to convert the model?"
|
||||
read -p "Press Y to convert or N to skip: " yn
|
||||
case $yn in
|
||||
[Yy]* )
|
||||
echo ""
|
||||
echo "Select a model to convert:"
|
||||
count=0
|
||||
for f in models/*; do
|
||||
count=$((count+1))
|
||||
file[$count]=$f
|
||||
echo "[$count] $f"
|
||||
done
|
||||
|
||||
Prompt user to choose a model to convert
|
||||
read -p "Enter the number of the model you want to convert: " modelNumber
|
||||
|
||||
if [ -z "${file[modelNumber]}" ]; then
|
||||
echo ""
|
||||
echo "Invalid option. Restarting..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
modelPath="${file[modelNumber]}"
|
||||
|
||||
echo ""
|
||||
echo "You selected $modelPath"
|
||||
|
||||
Ask user if they want to convert the model
|
||||
echo ""
|
||||
read -p "Do you want to convert the selected model to the new format? (Y/N)" choice
|
||||
|
||||
if [ "$choice" == "N" ]; then
|
||||
echo ""
|
||||
echo "Model conversion cancelled. Skipping..."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
The output inside a code tag
|
||||
echo "The code has been converted successfully."
|
||||
esac
|
||||
# Convert the model
|
||||
echo ""
|
||||
echo "Converting the model to the new format..."
|
||||
if [ ! -d "tmp/llama.cpp" ]; then
|
||||
git clone https://github.com/ggerganov/llama.cpp.git tmp/llama.cpp
|
||||
cd tmp\llama.cpp
|
||||
git checkout 0f07cacb05f49704d35a39aa27cfd4b419eb6f8d
|
||||
cd ..\..
|
||||
fi
|
||||
mv -f "${modelPath}" "${modelPath}.original"
|
||||
python tmp/llama.cpp/migrate-ggml-2023-03-30-pr613.py "${modelPath}.original" "${modelPath}"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo ""
|
||||
echo "Error during model conversion. Restarting..."
|
||||
mv -f "${modelPath}.original" "${modelPath}"
|
||||
goto CONVERT_RESTART
|
||||
else
|
||||
echo ""
|
||||
echo "The model file (${modelPath}) has been converted to the new format."
|
||||
goto END
|
||||
fi
|
||||
|
||||
:CANCEL_CONVERSION
|
||||
echo ""
|
||||
echo "Conversion cancelled. Skipping..."
|
||||
goto END
|
||||
|
||||
:END
|
||||
echo ""
|
||||
echo "Cleaning tmp folder"
|
||||
rm -rf "./tmp"
|
||||
|
@ -61,88 +61,19 @@ fi
|
||||
|
||||
echo ""
|
||||
echo "Downloading latest model..."
|
||||
curl -o "models/gpt4all-lora-quantized-ggml.bin" "https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized-ggml.bin"
|
||||
curl -o "models/gpt4all-lora-quantized-ggml.bin" "https://huggingface.co/ParisNeo/GPT4All/resolve/main/gpt4all-lora-quantized-ggml.bin"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to download model. Please check your internet connection."
|
||||
read -p "Do you want to try downloading again? Press Y to download." yn
|
||||
case $yn in
|
||||
[Yy]* ) echo "Downloading latest model..."
|
||||
curl -o "models/gpt4all-lora-quantized-ggml.bin" "https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized-ggml.bin";;
|
||||
curl -o "models/gpt4all-lora-quantized-ggml.bin" "https://huggingface.co/ParisNeo/GPT4All/resolve/main/gpt4all-lora-quantized-ggml.bin";;
|
||||
* ) echo "Skipping download of model file...";;
|
||||
esac
|
||||
else
|
||||
echo "Model successfully downloaded."
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "In order to make a model work, it needs to go through the LLaMA tokenizer, this will fix errors with the model in run.bat. Do you want to convert the model?"
|
||||
read -p "Press Y to convert or N to skip: " yn
|
||||
case $yn in
|
||||
[Yy]* )
|
||||
echo ""
|
||||
echo "Select a model to convert:"
|
||||
count=0
|
||||
for f in models/*; do
|
||||
count=$((count+1))
|
||||
file[$count]=$f
|
||||
echo "[$count] $f"
|
||||
done
|
||||
|
||||
Prompt user to choose a model to convert
|
||||
read -p "Enter the number of the model you want to convert: " modelNumber
|
||||
|
||||
if [ -z "${file[modelNumber]}" ]; then
|
||||
echo ""
|
||||
echo "Invalid option. Restarting..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
modelPath="${file[modelNumber]}"
|
||||
|
||||
echo ""
|
||||
echo "You selected $modelPath"
|
||||
|
||||
Ask user if they want to convert the model
|
||||
echo ""
|
||||
read -p "Do you want to convert the selected model to the new format? (Y/N)" choice
|
||||
|
||||
if [ "$choice" == "N" ]; then
|
||||
echo ""
|
||||
echo "Model conversion cancelled. Skipping..."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
The output inside a code tag
|
||||
echo "The code has been converted successfully."
|
||||
esac
|
||||
# Convert the model
|
||||
echo ""
|
||||
echo "Converting the model to the new format..."
|
||||
if [ ! -d "tmp/llama.cpp" ]; then
|
||||
git clone https://github.com/ggerganov/llama.cpp.git tmp/llama.cpp
|
||||
cd tmp\llama.cpp
|
||||
git checkout 0f07cacb05f49704d35a39aa27cfd4b419eb6f8d
|
||||
cd ..\..
|
||||
fi
|
||||
mv -f "${modelPath}" "${modelPath}.original"
|
||||
python tmp/llama.cpp/migrate-ggml-2023-03-30-pr613.py "${modelPath}.original" "${modelPath}"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo ""
|
||||
echo "Error during model conversion. Restarting..."
|
||||
mv -f "${modelPath}.original" "${modelPath}"
|
||||
goto CONVERT_RESTART
|
||||
else
|
||||
echo ""
|
||||
echo "The model file (${modelPath}) has been converted to the new format."
|
||||
goto END
|
||||
fi
|
||||
|
||||
:CANCEL_CONVERSION
|
||||
echo ""
|
||||
echo "Conversion cancelled. Skipping..."
|
||||
goto END
|
||||
|
||||
:END
|
||||
echo ""
|
||||
echo "Cleaning tmp folder"
|
||||
rm -rf "./tmp"
|
||||
|
74
install.bat
74
install.bat
@ -222,7 +222,7 @@ if not exist models/gpt4all-lora-quantized-ggml.bin (
|
||||
)
|
||||
|
||||
:DOWNLOAD_WITH_BROWSER
|
||||
start https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized-ggml.bin
|
||||
start https://huggingface.co/ParisNeo/GPT4All/resolve/main/gpt4all-lora-quantized-ggml.bin
|
||||
echo Link has been opened with the default web browser, make sure to save it into the models folder before continuing. Press any key to continue...
|
||||
pause
|
||||
goto :CONTINUE
|
||||
@ -230,7 +230,7 @@ goto :CONTINUE
|
||||
:MODEL_DOWNLOAD
|
||||
echo.
|
||||
echo Downloading latest model...
|
||||
powershell -Command "Invoke-WebRequest -Uri 'https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized-ggml.bin' -OutFile 'models/gpt4all-lora-quantized-ggml.bin'"
|
||||
powershell -Command "Invoke-WebRequest -Uri 'https://huggingface.co/ParisNeo/GPT4All/resolve/main/gpt4all-lora-quantized-ggml.bin' -OutFile 'models/gpt4all-lora-quantized-ggml.bin'"
|
||||
if errorlevel 1 (
|
||||
echo Failed to download model. Please check your internet connection.
|
||||
choice /C YN /M "Do you want to try downloading again?"
|
||||
@ -248,76 +248,6 @@ goto :CONTINUE
|
||||
|
||||
:CONTINUE
|
||||
|
||||
REM This code lists all files in the ./models folder and asks the user to choose one to convert.
|
||||
REM If the user agrees, it converts using Python. If not, it skips. On conversion failure, it reverts to original model.
|
||||
:CONVERT_RESTART
|
||||
echo.
|
||||
choice /C YN /M "In order to make a model work, it needs to go through the LLaMA tokenizer, this will fix errors with the model in run.bat. Do you want to convert the model?"
|
||||
if errorlevel 2 goto CANCEL_CONVERSION
|
||||
if errorlevel 1 goto CONVERT_START
|
||||
|
||||
:CONVERT_START
|
||||
REM List all files in the models folder
|
||||
setlocal EnableDelayedExpansion
|
||||
set count=0
|
||||
for %%a in (models\*.*) do (
|
||||
set /A count+=1
|
||||
set "file[!count!]=%%a"
|
||||
echo [!count!] %%a
|
||||
)
|
||||
|
||||
REM Prompt user to choose a model to convert
|
||||
set /P modelNumber="Enter the number of the model you want to convert: "
|
||||
|
||||
if not defined file[%modelNumber%] (
|
||||
echo.
|
||||
echo Invalid option. Restarting...
|
||||
goto CONVERT_RESTART
|
||||
)
|
||||
|
||||
set "modelPath=!file[%modelNumber%]!"
|
||||
|
||||
echo.
|
||||
echo You selected !modelPath!
|
||||
REM Ask user if they want to convert the model
|
||||
echo.
|
||||
choice /C YN /M "Do you want to convert the selected model to the new format?"
|
||||
if errorlevel 2 (
|
||||
echo.
|
||||
echo Model conversion cancelled. Skipping...
|
||||
goto END
|
||||
)
|
||||
REM Convert the model
|
||||
echo.
|
||||
echo Converting the model to the new format...
|
||||
if not exist tmp\llama.cpp git clone https://github.com/ggerganov/llama.cpp.git tmp\llama.cpp
|
||||
cd tmp\llama.cpp
|
||||
git checkout 0f07cacb05f49704d35a39aa27cfd4b419eb6f8d
|
||||
cd ..\..
|
||||
move /y "!modelPath!" "!modelPath!.original"
|
||||
python tmp\llama.cpp\migrate-ggml-2023-03-30-pr613.py "!modelPath!.original" "!modelPath!"
|
||||
if %errorlevel% neq 0 (
|
||||
goto ERROR_CONVERSION
|
||||
) else (
|
||||
goto SUCCESSFUL_CONVERSION
|
||||
)
|
||||
|
||||
:ERROR_CONVERSION
|
||||
echo.
|
||||
echo Error during model conversion. Restarting...
|
||||
move /y "!modelPath!.original" "!modelPath!"
|
||||
goto CONVERT_RESTART
|
||||
|
||||
:SUCCESSFUL_CONVERSION
|
||||
echo.
|
||||
echo The model file (!modelPath!) has been converted to the new format.
|
||||
goto END
|
||||
|
||||
:CANCEL_CONVERSION
|
||||
echo.
|
||||
echo Conversion cancelled. Skipping...
|
||||
goto END
|
||||
|
||||
:END
|
||||
|
||||
echo Cleaning tmp folder
|
||||
|
75
install.sh
75
install.sh
@ -100,8 +100,6 @@ echo "OK"
|
||||
|
||||
# Install the required packages
|
||||
echo "Installing requirements..."
|
||||
export DS_BUILD_OPS=0
|
||||
export DS_BUILD_AIO=0
|
||||
python3.11 -m pip install pip --upgrade
|
||||
python3.11 -m pip install -r requirements.txt
|
||||
|
||||
@ -113,88 +111,19 @@ fi
|
||||
|
||||
echo ""
|
||||
echo "Downloading latest model..."
|
||||
curl -o "models/gpt4all-lora-quantized-ggml.bin" "https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized-ggml.bin"
|
||||
curl -o "models/gpt4all-lora-quantized-ggml.bin" "https://huggingface.co/ParisNeo/GPT4All/resolve/main/gpt4all-lora-quantized-ggml.bin"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to download model. Please check your internet connection."
|
||||
read -p "Do you want to try downloading again? Press Y to download." yn
|
||||
case $yn in
|
||||
[Yy]* ) echo "Downloading latest model..."
|
||||
curl -o "models/gpt4all-lora-quantized-ggml.bin" "https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized-ggml.bin";;
|
||||
curl -o "models/gpt4all-lora-quantized-ggml.bin" "https://huggingface.co/ParisNeo/GPT4All/resolve/main/gpt4all-lora-quantized-ggml.bin";;
|
||||
* ) echo "Skipping download of model file...";;
|
||||
esac
|
||||
else
|
||||
echo "Model successfully downloaded."
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "In order to make a model work, it needs to go through the LLaMA tokenizer, this will fix errors with the model in run.bat. Do you want to convert the model?"
|
||||
read -p "Press Y to convert or N to skip: " yn
|
||||
case $yn in
|
||||
[Yy]* )
|
||||
echo ""
|
||||
echo "Select a model to convert:"
|
||||
count=0
|
||||
for f in models/*; do
|
||||
count=$((count+1))
|
||||
file[$count]=$f
|
||||
echo "[$count] $f"
|
||||
done
|
||||
|
||||
Prompt user to choose a model to convert
|
||||
read -p "Enter the number of the model you want to convert: " modelNumber
|
||||
|
||||
if [ -z "${file[modelNumber]}" ]; then
|
||||
echo ""
|
||||
echo "Invalid option. Restarting..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
modelPath="${file[modelNumber]}"
|
||||
|
||||
echo ""
|
||||
echo "You selected $modelPath"
|
||||
|
||||
Ask user if they want to convert the model
|
||||
echo ""
|
||||
read -p "Do you want to convert the selected model to the new format? (Y/N)" choice
|
||||
|
||||
if [ "$choice" == "N" ]; then
|
||||
echo ""
|
||||
echo "Model conversion cancelled. Skipping..."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
The output inside a code tag
|
||||
echo "The code has been converted successfully."
|
||||
esac
|
||||
# Convert the model
|
||||
echo ""
|
||||
echo "Converting the model to the new format..."
|
||||
if [ ! -d "tmp/llama.cpp" ]; then
|
||||
git clone https://github.com/ggerganov/llama.cpp.git tmp/llama.cpp
|
||||
cd tmp\llama.cpp
|
||||
git checkout 0f07cacb05f49704d35a39aa27cfd4b419eb6f8d
|
||||
cd ..\..
|
||||
fi
|
||||
mv -f "${modelPath}" "${modelPath}.original"
|
||||
python tmp/llama.cpp/migrate-ggml-2023-03-30-pr613.py "${modelPath}.original" "${modelPath}"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo ""
|
||||
echo "Error during model conversion. Restarting..."
|
||||
mv -f "${modelPath}.original" "${modelPath}"
|
||||
goto CONVERT_RESTART
|
||||
else
|
||||
echo ""
|
||||
echo "The model file (${modelPath}) has been converted to the new format."
|
||||
goto END
|
||||
fi
|
||||
|
||||
:CANCEL_CONVERSION
|
||||
echo ""
|
||||
echo "Conversion cancelled. Skipping..."
|
||||
goto END
|
||||
|
||||
:END
|
||||
echo ""
|
||||
echo "Cleaning tmp folder"
|
||||
rm -rf "./tmp"
|
||||
|
Loading…
Reference in New Issue
Block a user