Repared windows issues

This commit is contained in:
ParisNeo 2023-04-08 12:25:40 +02:00
parent 0c6a8dd1e7
commit 88c7f68a93
3 changed files with 10 additions and 26 deletions

3
app.py
View File

@ -5,7 +5,7 @@ import sqlite3
import traceback
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor
import sys
from flask import (
Flask,
@ -250,6 +250,7 @@ GPT4All:Welcome! I'm here to assist you with anything you need. What can I do fo
def new_text_callback(self, text: str):
print(text, end="")
sys.stdout.flush()
self.full_text += text
if self.is_bot_text_started:
self.bot_says += text

View File

@ -145,7 +145,7 @@ if not exist models/gpt4all-lora-quantized-ggml.bin (
:DOWNLOAD_WITH_BROWSER
start https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized-ggml.bin
echo Link has been opened with the default web browser, make sure to save it into the models folder. Press any key to continue.
echo Link has been opened with the default web browser, make sure to save it into the models folder. When it finishes the download, press any key to continue.
pause
goto :CONTINUE
@ -171,6 +171,13 @@ goto :CONTINUE
:CONTINUE
echo.
echo Converting the model to the new format
if not exist tmp/llama.cpp git clone https://github.com/ggerganov/llama.cpp.git tmp\llama.cpp
move models\gpt4all-lora-quantized-ggml.bin models\gpt4all-lora-quantized-ggml.bin.original
python tmp\llama.cpp\migrate-ggml-2023-03-30-pr613.py models\gpt4all-lora-quantized-ggml.bin.original models\gpt4all-lora-quantized-ggml.bin
echo The model file (gpt4all-lora-quantized-ggml.bin) has been fixed.
echo Cleaning tmp folder
rd /s /q "./tmp"

24
run.bat
View File

@ -38,32 +38,8 @@ echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
REM Activate the virtual environment
call env\Scripts\activate.bat
:RESTART
REM Run the Python app
python app.py %*
set app_result=%errorlevel%
REM Ask if user wants the model fixed
IF %app_result% EQU 0 (
goto END
) ELSE (
echo.
choice /C YN /M "The model file (gpt4all-lora-quantized-ggml.bin) appears to be invalid. Do you want to fix it?"
if errorlevel 2 goto END
if errorlevel 1 goto MODEL_FIX
)
REM Git Clone, Renames the bad model and fixes it using the same original name
:MODEL_FIX
if not exist llama.cpp git clone https://github.com/ggerganov/llama.cpp.git
move models\gpt4all-lora-quantized-ggml.bin models\gpt4all-lora-quantized-ggml.bin.original
python llama.cpp\migrate-ggml-2023-03-30-pr613.py models\gpt4all-lora-quantized-ggml.bin.original models\gpt4all-lora-quantized-ggml.bin
echo The model file (gpt4all-lora-quantized-ggml.bin) has been fixed. Press any key to restart...
pause >nul
goto RESTART
:END
REM Wait for user input before exiting
echo.
echo Press any key to exit...
pause >nul