diff --git a/app.py b/app.py index ad4f1338..48d67278 100644 --- a/app.py +++ b/app.py @@ -468,7 +468,7 @@ if __name__ == "__main__": ) parser.add_argument( - "-m", "--model", type=str, default="gpt4all-lora-quantized.bin", help="Force using a specific model." + "-m", "--model", type=str, default="gpt4all-lora-quantized-ggml.bin", help="Force using a specific model." ) parser.add_argument( "--temp", type=float, default=0.1, help="Temperature parameter for the model." diff --git a/run.bat b/run.bat index 78b98076..3d6ab34f 100644 --- a/run.bat +++ b/run.bat @@ -35,6 +35,35 @@ echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH -echo on -call env/Scripts/activate.bat -python app.py %* \ No newline at end of file +REM Activate the virtual environment +call env\Scripts\activate.bat + +:RESTART +REM Run the Python app +python app.py %* +set app_result=%errorlevel% + +REM Ask if user wants the model fixed +IF %app_result% EQU 0 ( + goto END +) ELSE ( + echo. + choice /C YN /M "The model file (gpt4all-lora-quantized-ggml.bin) appears to be invalid. Do you want to fix it?" + if errorlevel 2 goto END + if errorlevel 1 goto MODEL_FIX +) + +REM Git Clone, Renames the bad model and fixes it using the same original name +:MODEL_FIX +if not exist llama.cpp git clone https://github.com/ggerganov/llama.cpp.git +move models\gpt4all-lora-quantized-ggml.bin models\gpt4all-lora-quantized-ggml.bin.original +python llama.cpp\migrate-ggml-2023-03-30-pr613.py models\gpt4all-lora-quantized-ggml.bin.original models\gpt4all-lora-quantized-ggml.bin +echo The model file (gpt4all-lora-quantized-ggml.bin) has been fixed. Press any key to restart... +pause >nul +goto RESTART + +:END +REM Wait for user input before exiting +echo. +echo Press any key to exit... +pause >nul