diff --git a/app.py b/app.py index 48d67278..362759fc 100644 --- a/app.py +++ b/app.py @@ -5,7 +5,7 @@ import sqlite3 import traceback from datetime import datetime from concurrent.futures import ThreadPoolExecutor - +import sys from flask import ( Flask, @@ -250,6 +250,7 @@ GPT4All:Welcome! I'm here to assist you with anything you need. What can I do fo def new_text_callback(self, text: str): print(text, end="") + sys.stdout.flush() self.full_text += text if self.is_bot_text_started: self.bot_says += text diff --git a/install.bat b/install.bat index e96e7759..0fb0136b 100644 --- a/install.bat +++ b/install.bat @@ -145,7 +145,7 @@ if not exist models/gpt4all-lora-quantized-ggml.bin ( :DOWNLOAD_WITH_BROWSER start https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized-ggml.bin -echo Link has been opened with the default web browser, make sure to save it into the models folder. Press any key to continue. +echo Link has been opened with the default web browser, make sure to save it into the models folder. When it finishes the download, press any key to continue. pause goto :CONTINUE @@ -171,6 +171,13 @@ goto :CONTINUE :CONTINUE echo. +echo Converting the model to the new format +if not exist tmp/llama.cpp git clone https://github.com/ggerganov/llama.cpp.git tmp\llama.cpp +move models\gpt4all-lora-quantized-ggml.bin models\gpt4all-lora-quantized-ggml.bin.original +python tmp\llama.cpp\migrate-ggml-2023-03-30-pr613.py models\gpt4all-lora-quantized-ggml.bin.original models\gpt4all-lora-quantized-ggml.bin +echo The model file (gpt4all-lora-quantized-ggml.bin) has been fixed. + + echo Cleaning tmp folder rd /s /q "./tmp" diff --git a/run.bat b/run.bat index 3d6ab34f..77d99f59 100644 --- a/run.bat +++ b/run.bat @@ -38,32 +38,8 @@ echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH REM Activate the virtual environment call env\Scripts\activate.bat -:RESTART REM Run the Python app python app.py %* set app_result=%errorlevel% -REM Ask if user wants the model fixed -IF %app_result% EQU 0 ( - goto END -) ELSE ( - echo. - choice /C YN /M "The model file (gpt4all-lora-quantized-ggml.bin) appears to be invalid. Do you want to fix it?" - if errorlevel 2 goto END - if errorlevel 1 goto MODEL_FIX -) - -REM Git Clone, Renames the bad model and fixes it using the same original name -:MODEL_FIX -if not exist llama.cpp git clone https://github.com/ggerganov/llama.cpp.git -move models\gpt4all-lora-quantized-ggml.bin models\gpt4all-lora-quantized-ggml.bin.original -python llama.cpp\migrate-ggml-2023-03-30-pr613.py models\gpt4all-lora-quantized-ggml.bin.original models\gpt4all-lora-quantized-ggml.bin -echo The model file (gpt4all-lora-quantized-ggml.bin) has been fixed. Press any key to restart... -pause >nul -goto RESTART - -:END -REM Wait for user input before exiting -echo. -echo Press any key to exit... pause >nul