mirror of
https://github.com/ParisNeo/lollms-webui.git
synced 2025-02-20 17:22:47 +00:00
upgraded the install scripts
This commit is contained in:
parent
e8cc2dba78
commit
14c6a6803b
@ -138,7 +138,6 @@ else
|
||||
* ) echo "Skipping download of model file...";;
|
||||
esac
|
||||
fi
|
||||
#!/bin/bash
|
||||
|
||||
echo ""
|
||||
echo "Downloading latest model..."
|
||||
@ -169,13 +168,13 @@ case $yn in
|
||||
echo "[$count] $f"
|
||||
done
|
||||
|
||||
# Prompt user to choose a model to convert
|
||||
Prompt user to choose a model to convert
|
||||
read -p "Enter the number of the model you want to convert: " modelNumber
|
||||
|
||||
if [ -z "${file[modelNumber]}" ]; then
|
||||
echo ""
|
||||
echo "Invalid option. Restarting..."
|
||||
exit 1
|
||||
echo ""
|
||||
echo "Invalid option. Restarting..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
modelPath="${file[modelNumber]}"
|
||||
@ -183,41 +182,52 @@ case $yn in
|
||||
echo ""
|
||||
echo "You selected $modelPath"
|
||||
|
||||
# Ask user if they want to convert the model
|
||||
Ask user if they want to convert the model
|
||||
echo ""
|
||||
read -p "Do you want to convert the selected model to the new format? (Y/N)" choice
|
||||
|
||||
if [ "$choice" == "N" ]; then
|
||||
echo ""
|
||||
echo "Model conversion cancelled. Skipping..."
|
||||
else
|
||||
# Convert the model
|
||||
echo "Converting the model to the new format..."
|
||||
if [ ! -d "tmp/llama.cpp" ]; then
|
||||
git clone https://github.com/ggerganov/llama.cpp.git tmp/llama.cpp
|
||||
fi
|
||||
mv -f "${modelPath}" "${modelPath}.original"
|
||||
python tmp/llama.cpp/migrate-ggml-2023-03-30-pr613.py "${modelPath}.original" "${modelPath}"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo ""
|
||||
echo "Error during model conversion. Restarting..."
|
||||
mv -f "${modelPath}.original" "${modelPath}"
|
||||
exit 1
|
||||
else
|
||||
echo ""
|
||||
echo "The model file (${modelPath}) has been converted to the new format."
|
||||
fi
|
||||
echo ""
|
||||
echo "Model conversion cancelled. Skipping..."
|
||||
exit 0
|
||||
fi
|
||||
;;
|
||||
* ) echo ""
|
||||
echo "Conversion cancelled. Skipping..."
|
||||
;;
|
||||
esac
|
||||
|
||||
The output inside a code tag
|
||||
echo "The code has been converted successfully."
|
||||
esac
|
||||
# Convert the model
|
||||
echo ""
|
||||
echo "Converting the model to the new format..."
|
||||
if [ ! -d "tmp/llama.cpp" ]; then
|
||||
git clone https://github.com/ggerganov/llama.cpp.git tmp/llama.cpp
|
||||
cd tmp\llama.cpp
|
||||
git checkout 0f07cacb05f49704d35a39aa27cfd4b419eb6f8d
|
||||
cd ..\..
|
||||
fi
|
||||
mv -f "${modelPath}" "${modelPath}.original"
|
||||
python tmp/llama.cpp/migrate-ggml-2023-03-30-pr613.py "${modelPath}.original" "${modelPath}"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo ""
|
||||
echo "Error during model conversion. Restarting..."
|
||||
mv -f "${modelPath}.original" "${modelPath}"
|
||||
goto CONVERT_RESTART
|
||||
else
|
||||
echo ""
|
||||
echo "The model file (${modelPath}) has been converted to the new format."
|
||||
goto END
|
||||
fi
|
||||
|
||||
:CANCEL_CONVERSION
|
||||
echo ""
|
||||
echo "Conversion cancelled. Skipping..."
|
||||
goto END
|
||||
|
||||
:END
|
||||
echo ""
|
||||
echo "Cleaning tmp folder"
|
||||
rm -rf "./tmp"
|
||||
|
||||
|
||||
echo "Virtual environment created and packages installed successfully."
|
||||
echo "Everything is set up. Just run run.sh"
|
||||
exit 0
|
||||
echo "Every thing is setup. Just run run.sh"
|
||||
exit 0
|
||||
|
@ -59,12 +59,95 @@ if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo Downloading latest model
|
||||
wget -P models/ https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized-ggml.bin
|
||||
echo ""
|
||||
echo "Downloading latest model..."
|
||||
curl -o "models/gpt4all-lora-quantized-ggml.bin" "https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized-ggml.bin"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to download model. Please check your `wget` dependency, internet connection and try again."
|
||||
exit 1
|
||||
echo "Failed to download model. Please check your internet connection."
|
||||
read -p "Do you want to try downloading again? Press Y to download." yn
|
||||
case $yn in
|
||||
[Yy]* ) echo "Downloading latest model..."
|
||||
curl -o "models/gpt4all-lora-quantized-ggml.bin" "https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized-ggml.bin";;
|
||||
* ) echo "Skipping download of model file...";;
|
||||
esac
|
||||
else
|
||||
echo "Model successfully downloaded."
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "In order to make a model work, it needs to go through the LLaMA tokenizer, this will fix errors with the model in run.bat. Do you want to convert the model?"
|
||||
read -p "Press Y to convert or N to skip: " yn
|
||||
case $yn in
|
||||
[Yy]* )
|
||||
echo ""
|
||||
echo "Select a model to convert:"
|
||||
count=0
|
||||
for f in models/*; do
|
||||
count=$((count+1))
|
||||
file[$count]=$f
|
||||
echo "[$count] $f"
|
||||
done
|
||||
|
||||
Prompt user to choose a model to convert
|
||||
read -p "Enter the number of the model you want to convert: " modelNumber
|
||||
|
||||
if [ -z "${file[modelNumber]}" ]; then
|
||||
echo ""
|
||||
echo "Invalid option. Restarting..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
modelPath="${file[modelNumber]}"
|
||||
|
||||
echo ""
|
||||
echo "You selected $modelPath"
|
||||
|
||||
Ask user if they want to convert the model
|
||||
echo ""
|
||||
read -p "Do you want to convert the selected model to the new format? (Y/N)" choice
|
||||
|
||||
if [ "$choice" == "N" ]; then
|
||||
echo ""
|
||||
echo "Model conversion cancelled. Skipping..."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
The output inside a code tag
|
||||
echo "The code has been converted successfully."
|
||||
esac
|
||||
# Convert the model
|
||||
echo ""
|
||||
echo "Converting the model to the new format..."
|
||||
if [ ! -d "tmp/llama.cpp" ]; then
|
||||
git clone https://github.com/ggerganov/llama.cpp.git tmp/llama.cpp
|
||||
cd tmp\llama.cpp
|
||||
git checkout 0f07cacb05f49704d35a39aa27cfd4b419eb6f8d
|
||||
cd ..\..
|
||||
fi
|
||||
mv -f "${modelPath}" "${modelPath}.original"
|
||||
python tmp/llama.cpp/migrate-ggml-2023-03-30-pr613.py "${modelPath}.original" "${modelPath}"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo ""
|
||||
echo "Error during model conversion. Restarting..."
|
||||
mv -f "${modelPath}.original" "${modelPath}"
|
||||
goto CONVERT_RESTART
|
||||
else
|
||||
echo ""
|
||||
echo "The model file (${modelPath}) has been converted to the new format."
|
||||
goto END
|
||||
fi
|
||||
|
||||
:CANCEL_CONVERSION
|
||||
echo ""
|
||||
echo "Conversion cancelled. Skipping..."
|
||||
goto END
|
||||
|
||||
:END
|
||||
echo ""
|
||||
echo "Cleaning tmp folder"
|
||||
rm -rf "./tmp"
|
||||
|
||||
|
||||
echo "Virtual environment created and packages installed successfully."
|
||||
echo "Every thing is setup. Just run run.sh"
|
||||
exit 0
|
||||
|
@ -287,11 +287,13 @@ if errorlevel 2 (
|
||||
echo Model conversion cancelled. Skipping...
|
||||
goto END
|
||||
)
|
||||
|
||||
REM Convert the model
|
||||
echo.
|
||||
echo Converting the model to the new format...
|
||||
if not exist tmp\llama.cpp git clone https://github.com/ggerganov/llama.cpp.git tmp\llama.cpp
|
||||
cd tmp\llama.cpp
|
||||
git checkout 0f07cacb05f49704d35a39aa27cfd4b419eb6f8d
|
||||
cd ..\..
|
||||
move /y "!modelPath!" "!modelPath!.original"
|
||||
python tmp\llama.cpp\migrate-ggml-2023-03-30-pr613.py "!modelPath!.original" "!modelPath!"
|
||||
if %errorlevel% neq 0 (
|
||||
|
@ -172,6 +172,9 @@ echo ""
|
||||
echo "Converting the model to the new format..."
|
||||
if [ ! -d "tmp/llama.cpp" ]; then
|
||||
git clone https://github.com/ggerganov/llama.cpp.git tmp/llama.cpp
|
||||
cd tmp\llama.cpp
|
||||
git checkout 0f07cacb05f49704d35a39aa27cfd4b419eb6f8d
|
||||
cd ..\..
|
||||
fi
|
||||
mv -f "${modelPath}" "${modelPath}.original"
|
||||
python tmp/llama.cpp/migrate-ggml-2023-03-30-pr613.py "${modelPath}.original" "${modelPath}"
|
||||
|
Loading…
x
Reference in New Issue
Block a user