mirror of
https://github.com/ParisNeo/lollms-webui.git
synced 2024-12-18 20:17:50 +00:00
Merge branch 'main' of https://github.com/ParisNeo/gpt4all-ui
This commit is contained in:
commit
c1e6fc8a8e
21
.github/ISSUE_TEMPLATE.md
vendored
Normal file
21
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
## Expected Behavior
|
||||
Please describe the behavior you are expecting.
|
||||
|
||||
## Current Behavior
|
||||
Please describe the behavior you are currently experiencing.
|
||||
|
||||
## Steps to Reproduce
|
||||
Please provide detailed steps to reproduce the issue.
|
||||
|
||||
1. Step 1
|
||||
2. Step 2
|
||||
3. Step 3
|
||||
|
||||
## Possible Solution
|
||||
If you have any suggestions on how to fix the issue, please describe them here.
|
||||
|
||||
## Context
|
||||
Please provide any additional context about the issue.
|
||||
|
||||
## Screenshots
|
||||
If applicable, add screenshots to help explain the issue.
|
25
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
25
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
## Description
|
||||
Please include a summary of the change and which issue is fixed. Please also include relevant motivation and context. List any dependencies that are required for this change.
|
||||
|
||||
Fixes # (issue)
|
||||
|
||||
## Type of change
|
||||
Please delete options that are not relevant.
|
||||
|
||||
- [ ] Bug fix (non-breaking change which fixes an issue)
|
||||
- [ ] New feature (non-breaking change which adds functionality)
|
||||
- [ ] Breaking change (fix or feature that would cause existing functionality to change)
|
||||
|
||||
## Checklist:
|
||||
Please put an `x` in the boxes that apply. You can also fill these out after creating the PR.
|
||||
|
||||
- [ ] My code follows the style guidelines of this project
|
||||
- [ ] I have performed a self-review of my own code
|
||||
- [ ] I have commented my code, particularly in hard-to-understand areas
|
||||
- [ ] My changes generate no new warnings
|
||||
- [ ] I have added tests that prove my fix is effective or that my feature works
|
||||
- [ ] I have tested this code locally, and it is working as intended
|
||||
- [ ] I have updated the documentation accordingly
|
||||
|
||||
## Screenshots
|
||||
If applicable, add screenshots to help explain your changes.
|
7
.github/dependabot.yml
vendored
Normal file
7
.github/dependabot.yml
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
31
.github/workflows/docker.yaml
vendored
Normal file
31
.github/workflows/docker.yaml
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
name: Docker Build and Lint
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Build Docker Image
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
push: false
|
||||
tags: gpt4all-ui:latest
|
||||
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Run Hadolint
|
||||
run: |
|
||||
docker run --rm -i -v $PWD/.hadolint.yaml:/.config/hadolint.yaml hadolint/hadolint < Dockerfile
|
15
.gitignore
vendored
15
.gitignore
vendored
@ -130,13 +130,14 @@ dmypy.json
|
||||
|
||||
# Database
|
||||
*.db
|
||||
|
||||
# Docker files
|
||||
/data
|
||||
|
||||
# Models and tokenizers
|
||||
*.bin
|
||||
*.bin.orig
|
||||
*.model
|
||||
# models
|
||||
models/
|
||||
!models/.keep
|
||||
!models/README.md
|
||||
|
||||
|
||||
# Vscode files
|
||||
.vscode
|
||||
# Temporary files
|
||||
tmp/
|
||||
|
2
.hadolint.yaml
Normal file
2
.hadolint.yaml
Normal file
@ -0,0 +1,2 @@
|
||||
ignored:
|
||||
- SC1091
|
3
.vscode/settings.json
vendored
Normal file
3
.vscode/settings.json
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
{
|
||||
"ros.distro": "noetic"
|
||||
}
|
@ -3,12 +3,12 @@ FROM python:3.10
|
||||
WORKDIR /srv
|
||||
COPY ./requirements.txt .
|
||||
|
||||
RUN python3.10 -m venv env
|
||||
RUN . env/bin/activate
|
||||
RUN python3.10 -m pip install -r requirements.txt --upgrade pip
|
||||
RUN python3 -m venv venv && . venv/bin/activate
|
||||
RUN python3 -m pip install --no-cache-dir -r requirements.txt --upgrade pip
|
||||
|
||||
COPY ./app.py /srv/app.py
|
||||
COPY ./static /srv/static
|
||||
COPY ./templates /srv/templates
|
||||
|
||||
CMD ["python", "app.py", "--host", "0.0.0.0", "--port", "4685", "--db_path", "data/database.db"]
|
||||
# COPY ./models /srv/models # Mounting model is more efficient
|
||||
CMD ["python", "app.py", "--host", "0.0.0.0", "--port", "9600", "--db_path", "data/database.db"]
|
||||
|
120
README.md
120
README.md
@ -1,43 +1,28 @@
|
||||
# Gpt4All Web UI
|
||||
|
||||
![GitHub license](https://img.shields.io/github/license/ParisNeo/Gpt4All-webui)
|
||||
|
||||
![GitHub issues](https://img.shields.io/github/issues/ParisNeo/Gpt4All-webui)
|
||||
|
||||
![GitHub stars](https://img.shields.io/github/stars/ParisNeo/Gpt4All-webui)
|
||||
|
||||
![GitHub forks](https://img.shields.io/github/forks/ParisNeo/Gpt4All-webui)
|
||||
![GitHub license](https://img.shields.io/github/license/nomic-ai/GPT4All-ui)
|
||||
![GitHub issues](https://img.shields.io/github/issues/nomic-ai/GPT4All-ui)
|
||||
![GitHub stars](https://img.shields.io/github/stars/nomic-ai/GPT4All-ui)
|
||||
![GitHub forks](https://img.shields.io/github/forks/nomic-ai/GPT4All-ui)
|
||||
[![Discord](https://img.shields.io/discord/1092918764925882418?color=7289da&label=Discord&logo=discord&logoColor=ffffff)](https://discord.gg/DZ4wsgg4)
|
||||
|
||||
This is a Flask web application that provides a chat UI for interacting with the GPT4All chatbot.
|
||||
|
||||
[Discord server](https://discord.gg/DZ4wsgg4)
|
||||
Follow us on our [Discord server](https://discord.gg/DZ4wsgg4).
|
||||
|
||||
## What is GPT4All
|
||||
## What is GPT4All ?
|
||||
|
||||
GPT4All is a language model built by Nomic-AI, a company specializing in natural language processing. The app uses Nomic-AI's library to communicate with the GPT4All model, which runs locally on the user's PC. For more details about this project, head on to their [github repository](https://github.com/nomic-ai/gpt4all). You can also reald their [Technical report](https://s3.amazonaws.com/static.nomic.ai/gpt4all/2023_GPT4All_Technical_Report.pdf) for more information about the training process, the batabase etc.
|
||||
GPT4All is an exceptional language model, designed and developed by Nomic-AI, a proficient company dedicated to natural language processing. The app uses Nomic-AI's advanced library to communicate with the cutting-edge GPT4All model, which operates locally on the user's PC, ensuring seamless and efficient communication.
|
||||
|
||||
The app allows users to send messages to the chatbot and view its responses in real-time. Additionally, users can export the entire chat history in text or JSON format.
|
||||
If you are interested in learning more about this groundbreaking project, visit their Github repository [github repository](https://github.com/nomic-ai/gpt4all), where you can find comprehensive information regarding the app's functionalities and technical details. Moreover, you can delve deeper into the training process and database by going through their detailed Technical report, available for download at [Technical report](https://s3.amazonaws.com/static.nomic.ai/gpt4all/2023_GPT4All_Technical_Report.pdf).
|
||||
|
||||
The model has just been released and it may evolve over time, this webui is meant for community to get easy and fully local access to a chatbot that may become better with time.
|
||||
One of the app's impressive features is that it allows users to send messages to the chatbot and receive instantaneous responses in real-time, ensuring a seamless user experience. Additionally, the app facilitates the exportation of the entire chat history in either text or JSON format, providing greater flexibility to the users.
|
||||
|
||||
## Disclaimer
|
||||
|
||||
The model used by GPT4ALL has been fine-tuned using the LORA technique on LLAMA 7B weights (for now). It is important to note that the LLAMA weights are under commercial proprietary license, and therefore, this model cannot be used for commercial purposes. We do not provide the weights ourselves, but have built a UI wrapper on top of the Nomic library, which downloads the weights automatically upon running the program.
|
||||
|
||||
It is important to understand that we are not responsible for any misuse of this tool. Please use it responsibly and at your own risk. While we hope that Nomic will address this issue in the future by providing clean weights that can be used freely, for now, this model is intended for testing purposes only.
|
||||
It's worth noting that the model has recently been launched, and it's expected to evolve over time, enabling it to become even better in the future. This webui is designed to provide the community with easy and fully localized access to a chatbot that will continue to improve and adapt over time.
|
||||
|
||||
## UI screenshot
|
||||
![image](https://user-images.githubusercontent.com/827993/229951093-27114d9f-0e1f-4d84-b103-e35cd3f9310d.png)
|
||||
|
||||
**Note for Windows users:** At the moment, Nomic-AI has not provided a wheel for Windows, so you will need to use the app with the Windows Subsystem for Linux (WSL). To install WSL, follow these steps:
|
||||
|
||||
- Open the Windows Features settings (you can find this by searching for "Windows Features" in the Start menu).
|
||||
- Enable the "Windows Subsystem for Linux" feature.
|
||||
- Restart your computer when prompted.
|
||||
- Install a Linux distribution from the Microsoft Store (e.g., Ubuntu).
|
||||
- Open the Linux distribution and follow the prompts to create a new user account.
|
||||
- We apologize for any inconvenience this may cause. We are working on a more widespread version.
|
||||
|
||||
## Installation
|
||||
|
||||
To install the app, follow these steps:
|
||||
@ -45,35 +30,59 @@ To install the app, follow these steps:
|
||||
1. Clone the GitHub repository:
|
||||
|
||||
```
|
||||
git clone https://github.com/ParisNeo/Gpt4All-webui
|
||||
git clone https://github.com/nomic-ai/gpt4all-ui
|
||||
```
|
||||
|
||||
### Manual setup
|
||||
Hint: Scroll down for docker-compose setup
|
||||
|
||||
1. Navigate to the project directory:
|
||||
|
||||
```
|
||||
cd Gpt4All-webui
|
||||
```bash
|
||||
cd gpt4all-ui
|
||||
```
|
||||
|
||||
1. Run the appropriate installation script for your platform:
|
||||
2. Run the appropriate installation script for your platform:
|
||||
|
||||
On Windows with WSL:
|
||||
|
||||
- When Nomic add windows support you would be able to use this :
|
||||
|
||||
```
|
||||
install.bat
|
||||
```
|
||||
On Windows :
|
||||
```cmd
|
||||
install.bat
|
||||
```
|
||||
- On linux/ Mac os
|
||||
|
||||
```
|
||||
./install.sh
|
||||
```
|
||||
```bash
|
||||
bash ./install.sh
|
||||
```
|
||||
|
||||
On Linux/MacOS, if you have issues, refer more details are presented [here](docs/Linux_Osx_Install.md)
|
||||
These scripts will create a Python virtual environment and install the required dependencies. It will also download the models and install them.
|
||||
|
||||
These scripts will create a Python virtual environment and install the required dependencies.
|
||||
Now you're ready to work!
|
||||
|
||||
## Usage
|
||||
For simple newbies on Windows:
|
||||
```cmd
|
||||
run.bat
|
||||
```
|
||||
|
||||
For simple newbies on Linux/MacOsX:
|
||||
```bash
|
||||
bash run.sh
|
||||
```
|
||||
|
||||
if you want more control on your launch, you can activate your environment:
|
||||
|
||||
On Windows:
|
||||
```cmd
|
||||
env/Scripts/activate.bat
|
||||
```
|
||||
|
||||
On Linux/MacOs:
|
||||
```cmd
|
||||
source venv/bin/activate
|
||||
```
|
||||
|
||||
Now you are ready to customize your Bot.
|
||||
|
||||
To run the Flask server, execute the following command:
|
||||
```bash
|
||||
@ -101,6 +110,22 @@ Once the server is running, open your web browser and navigate to http://localho
|
||||
|
||||
Make sure to adjust the default values and descriptions of the options to match your specific application.
|
||||
|
||||
### Docker Compose Setup
|
||||
Make sure to have the `gpt4all-lora-quantized-ggml.bin` inside the `models` directory.
|
||||
After that you can simply use docker-compose or podman-compose to build and start the application:
|
||||
|
||||
Build
|
||||
```bash
|
||||
docker-compose -f docker-compose.yml build
|
||||
```
|
||||
|
||||
Start
|
||||
```bash
|
||||
docker-compose -f docker-compose.yml up
|
||||
```
|
||||
|
||||
After that you can open the application in your browser on http://localhost:9600
|
||||
|
||||
## Contribute
|
||||
|
||||
This is an open-source project by the community for the community. Our chatbot is a UI wrapper for Nomic AI's model, which enables natural language processing and machine learning capabilities.
|
||||
@ -144,15 +169,4 @@ We are excited about these future plans for the project and look forward to impl
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the Apache 2.0 License. See the [LICENSE](https://github.com/ParisNeo/Gpt4All-webui/blob/main/LICENSE) file for details.
|
||||
|
||||
## Special thanks
|
||||
|
||||
|
||||
Special thanks to :
|
||||
- [cclaar-byte](https://github.com/cclaar-byte)
|
||||
- [CybearWarfare](https://github.com/CybearWarfare)
|
||||
- [Jan Brummelte](https://github.com/brummelte)
|
||||
- [higorvaz](https://github.com/higorvaz)
|
||||
|
||||
for their contributions.
|
||||
This project is licensed under the Apache 2.0 License. See the [LICENSE](https://github.com/nomic-ai/GPT4All-ui/blob/main/LICENSE) file for details.
|
||||
|
474
app.py
474
app.py
@ -1,129 +1,150 @@
|
||||
from flask import Flask, jsonify, request, render_template, Response, stream_with_context
|
||||
from nomic.gpt4all import GPT4All
|
||||
import argparse
|
||||
import threading
|
||||
from io import StringIO
|
||||
import sys
|
||||
import json
|
||||
import re
|
||||
import sqlite3
|
||||
import traceback
|
||||
from datetime import datetime
|
||||
|
||||
import sqlite3
|
||||
import json
|
||||
import time
|
||||
import traceback
|
||||
from flask import (
|
||||
Flask,
|
||||
Response,
|
||||
jsonify,
|
||||
render_template,
|
||||
request,
|
||||
stream_with_context,
|
||||
)
|
||||
from pyllamacpp.model import Model
|
||||
|
||||
import select
|
||||
|
||||
#=================================== Database ==================================================================
|
||||
# =================================== Database ==================================================================
|
||||
class Discussion:
|
||||
def __init__(self, discussion_id, db_path='database.db'):
|
||||
def __init__(self, discussion_id, db_path="database.db"):
|
||||
self.discussion_id = discussion_id
|
||||
self.db_path = db_path
|
||||
|
||||
@staticmethod
|
||||
def create_discussion(db_path='database.db', title='untitled'):
|
||||
def create_discussion(db_path="database.db", title="untitled"):
|
||||
with sqlite3.connect(db_path) as conn:
|
||||
cur = conn.cursor()
|
||||
cur.execute("INSERT INTO discussion (title) VALUES (?)", (title,))
|
||||
discussion_id = cur.lastrowid
|
||||
conn.commit()
|
||||
return Discussion(discussion_id, db_path)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def get_discussion(db_path='database.db', id=0):
|
||||
return Discussion(id, db_path)
|
||||
def get_discussion(db_path="database.db", discussion_id=0):
|
||||
return Discussion(discussion_id, db_path)
|
||||
|
||||
def add_message(self, sender, content):
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
cur = conn.cursor()
|
||||
cur.execute('INSERT INTO message (sender, content, discussion_id) VALUES (?, ?, ?)',
|
||||
(sender, content, self.discussion_id))
|
||||
cur.execute(
|
||||
"INSERT INTO message (sender, content, discussion_id) VALUES (?, ?, ?)",
|
||||
(sender, content, self.discussion_id),
|
||||
)
|
||||
message_id = cur.lastrowid
|
||||
conn.commit()
|
||||
return message_id
|
||||
|
||||
@staticmethod
|
||||
def get_discussions(db_path):
|
||||
with sqlite3.connect(db_path) as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('SELECT * FROM discussion')
|
||||
cursor.execute("SELECT * FROM discussion")
|
||||
rows = cursor.fetchall()
|
||||
return [{'id': row[0], 'title': row[1]} for row in rows]
|
||||
return [{"id": row[0], "title": row[1]} for row in rows]
|
||||
|
||||
@staticmethod
|
||||
def rename(db_path, discussion_id, title):
|
||||
with sqlite3.connect(db_path) as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('UPDATE discussion SET title=? WHERE id=?', (title, discussion_id))
|
||||
cursor.execute(
|
||||
"UPDATE discussion SET title=? WHERE id=?", (title, discussion_id)
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
def delete_discussion(self):
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
cur = conn.cursor()
|
||||
cur.execute('DELETE FROM message WHERE discussion_id=?', (self.discussion_id,))
|
||||
cur.execute('DELETE FROM discussion WHERE id=?', (self.discussion_id,))
|
||||
cur.execute(
|
||||
"DELETE FROM message WHERE discussion_id=?", (self.discussion_id,)
|
||||
)
|
||||
cur.execute("DELETE FROM discussion WHERE id=?", (self.discussion_id,))
|
||||
conn.commit()
|
||||
|
||||
def get_messages(self):
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
cur = conn.cursor()
|
||||
cur.execute('SELECT * FROM message WHERE discussion_id=?', (self.discussion_id,))
|
||||
cur.execute(
|
||||
"SELECT * FROM message WHERE discussion_id=?", (self.discussion_id,)
|
||||
)
|
||||
rows = cur.fetchall()
|
||||
return [{'sender': row[1], 'content': row[2], 'id':row[0]} for row in rows]
|
||||
|
||||
|
||||
return [{"sender": row[1], "content": row[2], "id": row[0]} for row in rows]
|
||||
|
||||
def update_message(self, message_id, new_content):
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
cur = conn.cursor()
|
||||
cur.execute('UPDATE message SET content = ? WHERE id = ?', (new_content, message_id))
|
||||
cur.execute(
|
||||
"UPDATE message SET content = ? WHERE id = ?", (new_content, message_id)
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
def remove_discussion(self):
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
conn.cursor().execute('DELETE FROM discussion WHERE id=?', (self.discussion_id,))
|
||||
conn.cursor().execute(
|
||||
"DELETE FROM discussion WHERE id=?", (self.discussion_id,)
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
def last_discussion_has_messages(db_path='database.db'):
|
||||
|
||||
def last_discussion_has_messages(db_path="database.db"):
|
||||
with sqlite3.connect(db_path) as conn:
|
||||
c = conn.cursor()
|
||||
c.execute("SELECT * FROM message ORDER BY id DESC LIMIT 1")
|
||||
last_message = c.fetchone()
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SELECT * FROM message ORDER BY id DESC LIMIT 1")
|
||||
last_message = cursor.fetchone()
|
||||
return last_message is not None
|
||||
|
||||
def export_to_json(db_path='database.db'):
|
||||
|
||||
def export_to_json(db_path="database.db"):
|
||||
with sqlite3.connect(db_path) as conn:
|
||||
cur = conn.cursor()
|
||||
cur.execute('SELECT * FROM discussion')
|
||||
cur.execute("SELECT * FROM discussion")
|
||||
discussions = []
|
||||
for row in cur.fetchall():
|
||||
discussion_id = row[0]
|
||||
discussion = {'id': discussion_id, 'messages': []}
|
||||
cur.execute('SELECT * FROM message WHERE discussion_id=?', (discussion_id,))
|
||||
discussion = {"id": discussion_id, "messages": []}
|
||||
cur.execute("SELECT * FROM message WHERE discussion_id=?", (discussion_id,))
|
||||
for message_row in cur.fetchall():
|
||||
discussion['messages'].append({'sender': message_row[1], 'content': message_row[2]})
|
||||
discussion["messages"].append(
|
||||
{"sender": message_row[1], "content": message_row[2]}
|
||||
)
|
||||
discussions.append(discussion)
|
||||
return discussions
|
||||
|
||||
def remove_discussions(db_path='database.db'):
|
||||
|
||||
def remove_discussions(db_path="database.db"):
|
||||
with sqlite3.connect(db_path) as conn:
|
||||
cur = conn.cursor()
|
||||
cur.execute('DELETE FROM message')
|
||||
cur.execute('DELETE FROM discussion')
|
||||
cur.execute("DELETE FROM message")
|
||||
cur.execute("DELETE FROM discussion")
|
||||
conn.commit()
|
||||
|
||||
|
||||
# create database schema
|
||||
def check_discussion_db(db_path):
|
||||
print("Checking discussions database...")
|
||||
with sqlite3.connect(db_path) as conn:
|
||||
cur = conn.cursor()
|
||||
cur.execute('''
|
||||
cur.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS discussion (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
title TEXT
|
||||
)
|
||||
''')
|
||||
cur.execute('''
|
||||
"""
|
||||
)
|
||||
cur.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS message (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
sender TEXT NOT NULL,
|
||||
@ -131,45 +152,106 @@ def check_discussion_db(db_path):
|
||||
discussion_id INTEGER NOT NULL,
|
||||
FOREIGN KEY (discussion_id) REFERENCES discussion(id)
|
||||
)
|
||||
''')
|
||||
"""
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
print("Ok")
|
||||
|
||||
|
||||
# ========================================================================================================================
|
||||
|
||||
|
||||
app = Flask("GPT4All-WebUI", static_url_path="/static", static_folder="static")
|
||||
|
||||
app = Flask("GPT4All-WebUI", static_url_path='/static', static_folder='static')
|
||||
class Gpt4AllWebUI():
|
||||
def __init__(self, chatbot_bindings, app, db_path='database.db') -> None:
|
||||
|
||||
class Gpt4AllWebUI:
|
||||
def __init__(self, chatbot_bindings, _app, db_path="database.db") -> None:
|
||||
self.current_discussion = None
|
||||
self.chatbot_bindings = chatbot_bindings
|
||||
self.app=app
|
||||
self.db_path= db_path
|
||||
self.add_endpoint('/', '', self.index, methods=['GET'])
|
||||
self.add_endpoint('/stream', 'stream', self.stream, methods=['GET'])
|
||||
self.add_endpoint('/export', 'export', self.export, methods=['GET'])
|
||||
self.add_endpoint('/new_discussion', 'new_discussion', self.new_discussion, methods=['GET'])
|
||||
self.add_endpoint('/bot', 'bot', self.bot, methods=['POST'])
|
||||
self.add_endpoint('/discussions', 'discussions', self.discussions, methods=['GET'])
|
||||
self.add_endpoint('/rename', 'rename', self.rename, methods=['POST'])
|
||||
self.add_endpoint('/get_messages', 'get_messages', self.get_messages, methods=['POST'])
|
||||
self.add_endpoint('/delete_discussion', 'delete_discussion', self.delete_discussion, methods=['POST'])
|
||||
self.app = _app
|
||||
self.db_path = db_path
|
||||
self.add_endpoint("/", "", self.index, methods=["GET"])
|
||||
self.add_endpoint("/export", "export", self.export, methods=["GET"])
|
||||
self.add_endpoint(
|
||||
"/new_discussion", "new_discussion", self.new_discussion, methods=["GET"]
|
||||
)
|
||||
self.add_endpoint("/bot", "bot", self.bot, methods=["POST"])
|
||||
self.add_endpoint(
|
||||
"/discussions", "discussions", self.discussions, methods=["GET"]
|
||||
)
|
||||
self.add_endpoint("/rename", "rename", self.rename, methods=["POST"])
|
||||
self.add_endpoint(
|
||||
"/get_messages", "get_messages", self.get_messages, methods=["POST"]
|
||||
)
|
||||
self.add_endpoint(
|
||||
"/delete_discussion",
|
||||
"delete_discussion",
|
||||
self.delete_discussion,
|
||||
methods=["POST"],
|
||||
)
|
||||
|
||||
self.add_endpoint('/update_message', 'update_message', self.update_message, methods=['GET'])
|
||||
|
||||
|
||||
|
||||
self.add_endpoint(
|
||||
"/update_message", "update_message", self.update_message, methods=["GET"]
|
||||
)
|
||||
|
||||
conditionning_message = """
|
||||
Instruction: Act as GPT4All. A kind and helpful AI bot built to help users solve problems.
|
||||
Start by welcoming the user then stop sending text.
|
||||
GPT4All:"""
|
||||
self.prepare_query(conditionning_message)
|
||||
chatbot_bindings.generate(
|
||||
conditionning_message,
|
||||
n_predict=55,
|
||||
new_text_callback=self.new_text_callback,
|
||||
n_threads=8,
|
||||
)
|
||||
print(f"Bot said:{self.bot_says}")
|
||||
# Chatbot conditionning
|
||||
# response = self.chatbot_bindings.prompt("This is a discussion between A user and an AI. AI responds to user questions in a helpful manner. AI is not allowed to lie or deceive. AI welcomes the user\n### Response:")
|
||||
# print(response)
|
||||
|
||||
def add_endpoint(self, endpoint=None, endpoint_name=None, handler=None, methods=['GET'], *args, **kwargs):
|
||||
self.app.add_url_rule(endpoint, endpoint_name, handler, methods=methods, *args, **kwargs)
|
||||
def prepare_query(self, message):
|
||||
self.bot_says = ""
|
||||
self.full_text = ""
|
||||
self.is_bot_text_started = False
|
||||
self.current_message = message
|
||||
|
||||
def new_text_callback(self, text: str):
|
||||
print(text, end="")
|
||||
self.full_text += text
|
||||
if self.is_bot_text_started:
|
||||
self.bot_says += text
|
||||
if self.current_message in self.full_text:
|
||||
self.is_bot_text_started = True
|
||||
|
||||
def new_text_callback_with_yield(self, text: str):
|
||||
"""
|
||||
To do , fix the problem with yield to be able to show interactive response as text comes
|
||||
"""
|
||||
print(text, end="")
|
||||
self.full_text += text
|
||||
if self.is_bot_text_started:
|
||||
self.bot_says += text
|
||||
if self.current_message in self.full_text:
|
||||
self.is_bot_text_started = True
|
||||
yield text
|
||||
|
||||
def add_endpoint(
|
||||
self,
|
||||
endpoint=None,
|
||||
endpoint_name=None,
|
||||
handler=None,
|
||||
methods=["GET"],
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
self.app.add_url_rule(
|
||||
endpoint, endpoint_name, handler, methods=methods, *args, **kwargs
|
||||
)
|
||||
|
||||
def index(self):
|
||||
return render_template('chat.html')
|
||||
return render_template("chat.html")
|
||||
|
||||
def format_message(self, message):
|
||||
# Look for a code block within the message
|
||||
@ -184,170 +266,210 @@ class Gpt4AllWebUI():
|
||||
# Return the formatted message
|
||||
return message
|
||||
|
||||
|
||||
def stream(self):
|
||||
def generate():
|
||||
# Replace this with your text-generating code
|
||||
for i in range(10):
|
||||
yield f'This is line {i+1}\n'
|
||||
time.sleep(1)
|
||||
|
||||
return Response(stream_with_context(generate()))
|
||||
|
||||
def export(self):
|
||||
return jsonify(export_to_json(self.db_path))
|
||||
|
||||
|
||||
@stream_with_context
|
||||
def parse_to_prompt_stream(self, message, message_id):
|
||||
bot_says = ['']
|
||||
point = b''
|
||||
bot = self.chatbot_bindings.bot
|
||||
self.stop=False
|
||||
|
||||
# very important. This is the maximum time we wait for the model
|
||||
wait_val = 15.0 # At the beginning the server may need time to send data. we wait 15s
|
||||
bot_says = ""
|
||||
self.stop = False
|
||||
|
||||
# send the message to the bot
|
||||
print(f"Received message : {message}")
|
||||
bot = self.chatbot_bindings.bot
|
||||
bot.stdin.write(message.encode('utf-8'))
|
||||
bot.stdin.write(b"\n")
|
||||
bot.stdin.flush()
|
||||
|
||||
# First we need to send the new message ID to the client
|
||||
response_id = self.current_discussion.add_message("GPT4All",'') # first the content is empty, but we'll fill it at the end
|
||||
yield(json.dumps({'type':'input_message_infos','message':message, 'id':message_id, 'response_id':response_id}))
|
||||
response_id = self.current_discussion.add_message(
|
||||
"GPT4All", ""
|
||||
) # first the content is empty, but we'll fill it at the end
|
||||
yield (
|
||||
json.dumps(
|
||||
{
|
||||
"type": "input_message_infos",
|
||||
"message": message,
|
||||
"id": message_id,
|
||||
"response_id": response_id,
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
#Now let's wait for the bot to answer
|
||||
while not self.stop:
|
||||
readable, _, _ = select.select([bot.stdout], [], [], wait_val)
|
||||
wait_val = 4.0 # Once started, the process doesn't take that much so we reduce the wait
|
||||
if bot.stdout in readable:
|
||||
point += bot.stdout.read(1)
|
||||
try:
|
||||
character = point.decode("utf-8")
|
||||
if character == "\n":
|
||||
bot_says.append('\n')
|
||||
yield '\n'
|
||||
else:
|
||||
bot_says[-1] += character
|
||||
yield character
|
||||
point = b''
|
||||
self.current_message = "User: " + message + "\nGPT4All:"
|
||||
self.prepare_query(self.current_message)
|
||||
chatbot_model_bindings.generate(
|
||||
self.current_message,
|
||||
n_predict=55,
|
||||
new_text_callback=self.new_text_callback,
|
||||
n_threads=8,
|
||||
)
|
||||
|
||||
self.current_discussion.update_message(response_id, self.bot_says)
|
||||
yield self.bot_says
|
||||
# TODO : change this to use the yield version in order to send text word by word
|
||||
|
||||
return "\n".join(bot_says)
|
||||
|
||||
except UnicodeDecodeError:
|
||||
if len(point) > 4:
|
||||
point = b''
|
||||
else:
|
||||
self.current_discussion.update_message(response_id,bot_says)
|
||||
return "\n".join(bot_says)
|
||||
|
||||
def bot(self):
|
||||
self.stop=True
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
try:
|
||||
if self.current_discussion is None or not last_discussion_has_messages(self.db_path):
|
||||
self.current_discussion=Discussion.create_discussion(self.db_path)
|
||||
self.stop = True
|
||||
|
||||
message_id = self.current_discussion.add_message("user", request.json['message'])
|
||||
message = f"{request.json['message']}"
|
||||
try:
|
||||
if self.current_discussion is None or not last_discussion_has_messages(
|
||||
self.db_path
|
||||
):
|
||||
self.current_discussion = Discussion.create_discussion(self.db_path)
|
||||
|
||||
message_id = self.current_discussion.add_message(
|
||||
"user", request.json["message"]
|
||||
)
|
||||
message = f"{request.json['message']}"
|
||||
|
||||
# Segmented (the user receives the output as it comes)
|
||||
# We will first send a json entry that contains the message id and so on, then the text as it goes
|
||||
return Response(
|
||||
stream_with_context(
|
||||
self.parse_to_prompt_stream(message, message_id)
|
||||
)
|
||||
)
|
||||
except Exception as ex:
|
||||
print(ex)
|
||||
return (
|
||||
"<b style='color:red;'>Exception :<b>"
|
||||
+ str(ex)
|
||||
+ "<br>"
|
||||
+ traceback.format_exc()
|
||||
+ "<br>Please report exception"
|
||||
)
|
||||
|
||||
# Segmented (the user receives the output as it comes)
|
||||
# We will first send a json entry that contains the message id and so on, then the text as it goes
|
||||
return Response(stream_with_context(self.parse_to_prompt_stream(message, message_id)))
|
||||
except Exception as ex:
|
||||
print(ex)
|
||||
msg = traceback.print_exc()
|
||||
return "<b style='color:red;'>Exception :<b>"+str(ex)+"<br>"+traceback.format_exc()+"<br>Please report exception"
|
||||
|
||||
def discussions(self):
|
||||
try:
|
||||
discussions = Discussion.get_discussions(self.db_path)
|
||||
discussions = Discussion.get_discussions(self.db_path)
|
||||
return jsonify(discussions)
|
||||
except Exception as ex:
|
||||
print(ex)
|
||||
msg = traceback.print_exc()
|
||||
return "<b style='color:red;'>Exception :<b>"+str(ex)+"<br>"+traceback.format_exc()+"<br>Please report exception"
|
||||
return (
|
||||
"<b style='color:red;'>Exception :<b>"
|
||||
+ str(ex)
|
||||
+ "<br>"
|
||||
+ traceback.format_exc()
|
||||
+ "<br>Please report exception"
|
||||
)
|
||||
|
||||
def rename(self):
|
||||
data = request.get_json()
|
||||
id = data['id']
|
||||
title = data['title']
|
||||
Discussion.rename(self.db_path, id, title)
|
||||
discussion_id = data["id"]
|
||||
title = data["title"]
|
||||
Discussion.rename(self.db_path, discussion_id, title)
|
||||
return "renamed successfully"
|
||||
|
||||
def get_messages(self):
|
||||
data = request.get_json()
|
||||
id = data['id']
|
||||
self.current_discussion = Discussion(id,self.db_path)
|
||||
discussion_id = data["id"]
|
||||
self.current_discussion = Discussion(discussion_id, self.db_path)
|
||||
messages = self.current_discussion.get_messages()
|
||||
return jsonify(messages)
|
||||
|
||||
|
||||
def delete_discussion(self):
|
||||
data = request.get_json()
|
||||
id = data['id']
|
||||
self.current_discussion = Discussion(id, self.db_path)
|
||||
discussion_id = data["id"]
|
||||
self.current_discussion = Discussion(discussion_id, self.db_path)
|
||||
self.current_discussion.delete_discussion()
|
||||
self.current_discussion = None
|
||||
return jsonify({})
|
||||
|
||||
|
||||
def update_message(self):
|
||||
try:
|
||||
id = request.args.get('id')
|
||||
new_message = request.args.get('message')
|
||||
self.current_discussion.update_message(id, new_message)
|
||||
return jsonify({"status":'ok'})
|
||||
discussion_id = request.args.get("id")
|
||||
new_message = request.args.get("message")
|
||||
self.current_discussion.update_message(discussion_id, new_message)
|
||||
return jsonify({"status": "ok"})
|
||||
except Exception as ex:
|
||||
print(ex)
|
||||
msg = traceback.print_exc()
|
||||
return "<b style='color:red;'>Exception :<b>"+str(ex)+"<br>"+traceback.format_exc()+"<br>Please report exception"
|
||||
return (
|
||||
"<b style='color:red;'>Exception :<b>"
|
||||
+ str(ex)
|
||||
+ "<br>"
|
||||
+ traceback.format_exc()
|
||||
+ "<br>Please report exception"
|
||||
)
|
||||
|
||||
def new_discussion(self):
|
||||
title = request.args.get('title')
|
||||
self.current_discussion= Discussion.create_discussion(self.db_path, title)
|
||||
title = request.args.get("title")
|
||||
self.current_discussion = Discussion.create_discussion(self.db_path, title)
|
||||
# Get the current timestamp
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
# add a new discussion
|
||||
self.chatbot_bindings.close()
|
||||
self.chatbot_bindings.open()
|
||||
# self.chatbot_bindings.close()
|
||||
# self.chatbot_bindings.open()
|
||||
|
||||
# Return a success response
|
||||
return json.dumps({'id': self.current_discussion.discussion_id})
|
||||
return json.dumps({"id": self.current_discussion.discussion_id, "time": timestamp})
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description='Start the chatbot Flask app.')
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Start the chatbot Flask app.")
|
||||
|
||||
parser.add_argument('--temp', type=float, default=0.1, help='Temperature parameter for the model.')
|
||||
parser.add_argument('--n_predict', type=int, default=128, help='Number of tokens to predict at each step.')
|
||||
parser.add_argument('--top_k', type=int, default=40, help='Value for the top-k sampling.')
|
||||
parser.add_argument('--top_p', type=float, default=0.95, help='Value for the top-p sampling.')
|
||||
parser.add_argument('--repeat_penalty', type=float, default=1.3, help='Penalty for repeated tokens.')
|
||||
parser.add_argument('--repeat_last_n', type=int, default=64, help='Number of previous tokens to consider for the repeat penalty.')
|
||||
parser.add_argument('--ctx_size', type=int, default=2048, help='Size of the context window for the model.')
|
||||
parser.add_argument('--debug', dest='debug', action='store_true', help='launch Flask server in debug mode')
|
||||
parser.add_argument('--host', type=str, default='localhost', help='the hostname to listen on')
|
||||
parser.add_argument('--port', type=int, default=9600, help='the port to listen on')
|
||||
parser.add_argument('--db_path', type=str, default='database.db', help='Database path')
|
||||
parser.add_argument(
|
||||
"--temp", type=float, default=0.1, help="Temperature parameter for the model."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--n_predict",
|
||||
type=int,
|
||||
default=128,
|
||||
help="Number of tokens to predict at each step.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--top_k", type=int, default=40, help="Value for the top-k sampling."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--top_p", type=float, default=0.95, help="Value for the top-p sampling."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--repeat_penalty", type=float, default=1.3, help="Penalty for repeated tokens."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--repeat_last_n",
|
||||
type=int,
|
||||
default=64,
|
||||
help="Number of previous tokens to consider for the repeat penalty.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ctx_size",
|
||||
type=int,
|
||||
default=2048,
|
||||
help="Size of the context window for the model.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--debug",
|
||||
dest="debug",
|
||||
action="store_true",
|
||||
help="launch Flask server in debug mode",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--host", type=str, default="localhost", help="the hostname to listen on"
|
||||
)
|
||||
parser.add_argument("--port", type=int, default=9600, help="the port to listen on")
|
||||
parser.add_argument(
|
||||
"--db_path", type=str, default="database.db", help="Database path"
|
||||
)
|
||||
parser.set_defaults(debug=False)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
chatbot_bindings = GPT4All(decoder_config = {
|
||||
'temp': args.temp,
|
||||
'n_predict':args.n_predict,
|
||||
'top_k':args.top_k,
|
||||
'top_p':args.top_p,
|
||||
#'color': True,#"## Instruction",
|
||||
'repeat_penalty': args.repeat_penalty,
|
||||
'repeat_last_n':args.repeat_last_n,
|
||||
'ctx_size': args.ctx_size
|
||||
})
|
||||
chatbot_bindings.open()
|
||||
chatbot_model_bindings = Model(
|
||||
ggml_model="./models/gpt4all-lora-quantized-ggml.bin", n_ctx=512
|
||||
)
|
||||
|
||||
# Old Code
|
||||
# GPT4All(decoder_config = {
|
||||
# 'temp': args.temp,
|
||||
# 'n_predict':args.n_predict,
|
||||
# 'top_k':args.top_k,
|
||||
# 'top_p':args.top_p,
|
||||
# #'color': True,#"## Instruction",
|
||||
# 'repeat_penalty': args.repeat_penalty,
|
||||
# 'repeat_last_n':args.repeat_last_n,
|
||||
# 'ctx_size': args.ctx_size
|
||||
# })
|
||||
check_discussion_db(args.db_path)
|
||||
bot = Gpt4AllWebUI(chatbot_bindings, app, args.db_path)
|
||||
bot = Gpt4AllWebUI(chatbot_model_bindings, app, args.db_path)
|
||||
|
||||
if args.debug:
|
||||
app.run(debug=True, host=args.host, port=args.port)
|
||||
|
@ -8,5 +8,6 @@ services:
|
||||
volumes:
|
||||
- ./data:/srv/data
|
||||
- ./data/.nomic:/root/.nomic/
|
||||
- ./models:/srv/models
|
||||
ports:
|
||||
- "4685:4685"
|
||||
- "9600:9600"
|
||||
|
@ -1,5 +1,11 @@
|
||||
# Installing GPT4All-Webui on Linux or macOS:
|
||||
|
||||
\- Make sure you have all the dependencies for requirements
|
||||
`python3.11 -m pip install cmake`
|
||||
`python3.11 -m pip install nproc` if you have issues with scikit-learn add the foollowing env var
|
||||
`SKLEARN_ALLOW_DEPRECATED_SKLEARN_PACKAGE_INSTALL=True`
|
||||
`python3.11 -m pip install wget`
|
||||
|
||||
\- Install requirements
|
||||
python3.11 -m pip install -r requirements.txt
|
||||
|
||||
|
@ -59,5 +59,12 @@ if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo Downloading latest model
|
||||
wget -P models/ https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized-ggml.bin
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to download model. Please check your `wget` dependency, internet connection and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Virtual environment created and packages installed successfully."
|
||||
exit 0
|
||||
|
126
install.bat
126
install.bat
@ -1,18 +1,54 @@
|
||||
@echo off
|
||||
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHH .HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHH. ,HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHH.## HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHH#.HHHHH/*,*,*,*,*,*,*,*,***,*,**#HHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHH.*,,***,***,***,***,***,***,*******HHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHH*,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,*,,,,,HHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHH.,,,***,***,***,***,***,***,***,***,***,***/HHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHH*,,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*HHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHH#,***,***,***,***,***,***,***,***,***,***,***,**HHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHH..HHH,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,*#HHHHHHHHHHHHHHHH
|
||||
echo HHHHHHH,,,**,/H*,***,***,***,,,*,***,***,***,**,,,**,***,***,***H,,*,***HHHHHHHH
|
||||
echo HHHHHH.*,,,*,,,,,*,*,*,***#HHHHH.,,*,*,*,*,**/HHHHH.,*,*,*,*,*,*,*,*****HHHHHHHH
|
||||
echo HHHHHH.*,***,*,*,***,***,.HHHHHHH/**,***,****HHHHHHH.***,***,***,*******HHHHHHHH
|
||||
echo HHHHHH.,,,,,,,,,,,,,,,,,,,.HHHHH.,,,,,,,,,,,,.HHHHHH,,,,,,,,,,,,,,,,,***HHHHHHHH
|
||||
echo HHHHHH.,,,,,,/H,,,**,***,***,,,*,***,***,***,**,,,,*,***,***,***H***,***HHHHHHHH
|
||||
echo HHHHHHH.,,,,*.H,,,,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,***H*,,,,/HHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHH*,***,***,**,,***,***,***,***,***,***,***,***,**.HHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHH,,,,,,,,*,,#H#,,,,,*,,,*,,,,,,,,*#H*,,,,,,,,,**HHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHH,,*,***,***,**/.HHHHHHHHHHHHH#*,,,*,***,***,*HHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHH,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*HHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHH**,***,***,***,***,***,***,***,***,***,***,*.HHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHH*,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,*HHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHH**,***,***,*******/..HHHHHHHHH.#/*,*,,,***,***HHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHH*,*,*,******#HHHHHHHHHHHHHHHHHHHHHHHHHHHH./**,,,.HHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHH.,,*,***.HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH.*#HHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHH/,,,*.HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHH,,#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHH.HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
|
||||
if not exist "./tmp" mkdir "./tmp"
|
||||
|
||||
REM Check if Python is installed
|
||||
set /p="Checking for python..." <nul
|
||||
where python >nul 2>&1
|
||||
if %ERRORLEVEL% neq 0 (
|
||||
echo Python is not installed. Would you like to install Python? [Y/N]
|
||||
set /p choice=
|
||||
if /i "%choice%" equ "Y" (
|
||||
where python >nul 2>&1
|
||||
if %errorlevel% neq 0 (
|
||||
set /p choice=Python is not installed. Would you like to install Python? [Y/N]
|
||||
if /i ".choice." equ "Y" (
|
||||
REM Download Python installer
|
||||
echo Downloading Python installer...
|
||||
powershell -Command "Invoke-WebRequest -Uri 'https://www.python.org/ftp/python/3.10.0/python-3.10.0-amd64.exe' -OutFile 'python.exe'"
|
||||
powershell -Command "Invoke-WebRequest -Uri 'https://www.python.org/ftp/python/3.10.0/python-3.10.0-amd64.exe' -OutFile 'tmp/python.exe'"
|
||||
REM Install Python
|
||||
echo Installing Python...
|
||||
python.exe /quiet /norestart
|
||||
tmp/python.exe /quiet /norestart
|
||||
) else (
|
||||
echo Please install Python and try again.
|
||||
pause
|
||||
@ -22,20 +58,20 @@ if %ERRORLEVEL% neq 0 (
|
||||
echo OK
|
||||
)
|
||||
|
||||
|
||||
REM Check if pip is installed
|
||||
set /p="Checking for pip..." <nul
|
||||
python -m pip >nul 2>&1
|
||||
if %ERRORLEVEL% neq 0 (
|
||||
echo Pip is not installed. Would you like to install pip? [Y/N]
|
||||
set /p choice=
|
||||
if /i "%choice%" equ "Y" (
|
||||
if %errorlevel% neq 0 (
|
||||
set /p choice=Pip is not installed. Would you like to install pip? [Y/N]
|
||||
if /i ".choice." equ "Y" (
|
||||
REM Download get-pip.py
|
||||
echo Downloading get-pip.py...
|
||||
powershell -Command "Invoke-WebRequest -Uri 'https://bootstrap.pypa.io/get-pip.py' -OutFile 'get-pip.py'"
|
||||
powershell -Command "Invoke-WebRequest -Uri 'https://bootstrap.pypa.io/get-pip.py' -OutFile 'tmp/get-pip.py'"
|
||||
REM Install pip
|
||||
echo Installing pip...
|
||||
python get-pip.py
|
||||
) else (
|
||||
python tmp/get-pip.py
|
||||
) else .
|
||||
echo Please install pip and try again.
|
||||
pause
|
||||
exit /b 1
|
||||
@ -47,10 +83,9 @@ if %ERRORLEVEL% neq 0 (
|
||||
REM Check if venv module is available
|
||||
set /p="Checking for venv..." <nul
|
||||
python -c "import venv" >nul 2>&1
|
||||
if %ERRORLEVEL% neq 0 (
|
||||
echo venv module is not available. Would you like to upgrade Python to the latest version? [Y/N]
|
||||
set /p choice=
|
||||
if /i "%choice%" equ "Y" (
|
||||
if %errorlevel% neq 0 (
|
||||
set /p choice=venv module is not available. Would you like to upgrade Python to the latest version? [Y/N]
|
||||
if /i ".choice." equ "Y" (
|
||||
REM Upgrade Python
|
||||
echo Upgrading Python...
|
||||
python -m pip install --upgrade pip setuptools wheel
|
||||
@ -67,7 +102,7 @@ if %ERRORLEVEL% neq 0 (
|
||||
REM Create a new virtual environment
|
||||
set /p="Creating virtual environment ..." <nul
|
||||
python -m venv env
|
||||
if %ERRORLEVEL% neq 0 (
|
||||
if %errorlevel% neq 0 (
|
||||
echo Failed to create virtual environment. Please check your Python installation and try again.
|
||||
pause
|
||||
exit /b 1
|
||||
@ -81,8 +116,6 @@ call env\Scripts\activate.bat
|
||||
echo OK
|
||||
REM Install the required packages
|
||||
echo Installing requirements ...
|
||||
set DS_BUILD_OPS=0
|
||||
set DS_BUILD_AIO=0
|
||||
python -m pip install pip --upgrade
|
||||
python -m pip install -r requirements.txt
|
||||
if %ERRORLEVEL% neq 0 (
|
||||
@ -91,6 +124,57 @@ if %ERRORLEVEL% neq 0 (
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
echo Downloading latest model
|
||||
if not exist models (
|
||||
md models
|
||||
)
|
||||
|
||||
if not exist models/gpt4all-lora-quantized-ggml.bin (
|
||||
echo.
|
||||
choice /C YNB /M "The default model file (gpt4all-lora-quantized-ggml.bin) does not exist. Do you want to download it? Press B to download it with a browser (faster)."
|
||||
if errorlevel 3 goto DOWNLOAD_WITH_BROWSER
|
||||
if errorlevel 2 goto DOWNLOAD_SKIP
|
||||
if errorlevel 1 goto MODEL_DOWNLOAD
|
||||
) ELSE (
|
||||
echo.
|
||||
choice /C YNB /M "The default model file (gpt4all-lora-quantized-ggml.bin) already exists. Do you want to replace it? Press B to download it with a browser (faster)."
|
||||
if errorlevel 3 goto DOWNLOAD_WITH_BROWSER
|
||||
if errorlevel 2 goto DOWNLOAD_SKIP
|
||||
if errorlevel 1 goto MODEL_DOWNLOAD
|
||||
)
|
||||
|
||||
:DOWNLOAD_WITH_BROWSER
|
||||
start https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized-ggml.bin
|
||||
echo Link has been opened with the default web browser, make sure to save it into the models folder. Press any key to continue.
|
||||
pause
|
||||
goto :CONTINUE
|
||||
|
||||
:MODEL_DOWNLOAD
|
||||
echo.
|
||||
echo Downloading latest model...
|
||||
powershell -Command "Invoke-WebRequest -Uri 'https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized-ggml.bin' -OutFile 'models/gpt4all-lora-quantized-ggml.bin'"
|
||||
if errorlevel 1 (
|
||||
echo Failed to download model. Please check your internet connection.
|
||||
choice /C YN /M "Do you want to try downloading again?"
|
||||
if errorlevel 2 goto DOWNLOAD_SKIP
|
||||
if errorlevel 1 goto MODEL_DOWNLOAD
|
||||
) else (
|
||||
echo Model successfully downloaded.
|
||||
)
|
||||
goto :CONTINUE
|
||||
|
||||
:DOWNLOAD_SKIP
|
||||
echo.
|
||||
echo Skipping download of model file...
|
||||
goto :CONTINUE
|
||||
|
||||
:CONTINUE
|
||||
echo.
|
||||
|
||||
echo Cleaning tmp folder
|
||||
rd /s /q "./tmp"
|
||||
|
||||
echo Virtual environment created and packages installed successfully.
|
||||
echo Every thing is setup. Just run run.bat
|
||||
pause
|
||||
exit /b 0
|
||||
|
46
install.sh
46
install.sh
@ -1,5 +1,42 @@
|
||||
#!/usr/bin/bash
|
||||
|
||||
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHH .HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHH. ,HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHH.## HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHH#.HHHHH/*,*,*,*,*,*,*,*,***,*,**#HHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHH.*,,***,***,***,***,***,***,*******HHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHH*,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,*,,,,,HHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHH.,,,***,***,***,***,***,***,***,***,***,***/HHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHH*,,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*HHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHH#,***,***,***,***,***,***,***,***,***,***,***,**HHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHH..HHH,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,*#HHHHHHHHHHHHHHHH
|
||||
echo HHHHHHH,,,**,/H*,***,***,***,,,*,***,***,***,**,,,**,***,***,***H,,*,***HHHHHHHH
|
||||
echo HHHHHH.*,,,*,,,,,*,*,*,***#HHHHH.,,*,*,*,*,**/HHHHH.,*,*,*,*,*,*,*,*****HHHHHHHH
|
||||
echo HHHHHH.*,***,*,*,***,***,.HHHHHHH/**,***,****HHHHHHH.***,***,***,*******HHHHHHHH
|
||||
echo HHHHHH.,,,,,,,,,,,,,,,,,,,.HHHHH.,,,,,,,,,,,,.HHHHHH,,,,,,,,,,,,,,,,,***HHHHHHHH
|
||||
echo HHHHHH.,,,,,,/H,,,**,***,***,,,*,***,***,***,**,,,,*,***,***,***H***,***HHHHHHHH
|
||||
echo HHHHHHH.,,,,*.H,,,,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,***H*,,,,/HHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHH*,***,***,**,,***,***,***,***,***,***,***,***,**.HHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHH,,,,,,,,*,,#H#,,,,,*,,,*,,,,,,,,*#H*,,,,,,,,,**HHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHH,,*,***,***,**/.HHHHHHHHHHHHH#*,,,*,***,***,*HHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHH,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*HHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHH**,***,***,***,***,***,***,***,***,***,***,*.HHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHH*,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,*HHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHH**,***,***,*******/..HHHHHHHHH.#/*,*,,,***,***HHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHH*,*,*,******#HHHHHHHHHHHHHHHHHHHHHHHHHHHH./**,,,.HHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHH.,,*,***.HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH.*#HHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHH/,,,*.HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHH,,#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHH.HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
|
||||
|
||||
# Install Python 3.11 and pip
|
||||
echo -n "Checking for python3.11..."
|
||||
if command -v python3.11 > /dev/null 2>&1; then
|
||||
@ -59,5 +96,14 @@ if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo Downloading latest model
|
||||
wget -P models/ https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized-ggml.bin
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to download model. Please check your `wget` dependency, internet connection and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
echo "Virtual environment created and packages installed successfully."
|
||||
echo "Every thing is setup. Just run run.sh"
|
||||
exit 0
|
||||
|
0
models/.keep
Normal file
0
models/.keep
Normal file
@ -1,4 +1,4 @@
|
||||
flask
|
||||
nomic
|
||||
pytest
|
||||
pyllamacpp
|
||||
pyllamacpp
|
||||
|
39
run.bat
Normal file
39
run.bat
Normal file
@ -0,0 +1,39 @@
|
||||
echo off
|
||||
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHH .HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHH. ,HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHH.## HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHH#.HHHHH/*,*,*,*,*,*,*,*,***,*,**#HHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHH.*,,***,***,***,***,***,***,*******HHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHH*,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,*,,,,,HHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHH.,,,***,***,***,***,***,***,***,***,***,***/HHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHH*,,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*HHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHH#,***,***,***,***,***,***,***,***,***,***,***,**HHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHH..HHH,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,*#HHHHHHHHHHHHHHHH
|
||||
echo HHHHHHH,,,**,/H*,***,***,***,,,*,***,***,***,**,,,**,***,***,***H,,*,***HHHHHHHH
|
||||
echo HHHHHH.*,,,*,,,,,*,*,*,***#HHHHH.,,*,*,*,*,**/HHHHH.,*,*,*,*,*,*,*,*****HHHHHHHH
|
||||
echo HHHHHH.*,***,*,*,***,***,.HHHHHHH/**,***,****HHHHHHH.***,***,***,*******HHHHHHHH
|
||||
echo HHHHHH.,,,,,,,,,,,,,,,,,,,.HHHHH.,,,,,,,,,,,,.HHHHHH,,,,,,,,,,,,,,,,,***HHHHHHHH
|
||||
echo HHHHHH.,,,,,,/H,,,**,***,***,,,*,***,***,***,**,,,,*,***,***,***H***,***HHHHHHHH
|
||||
echo HHHHHHH.,,,,*.H,,,,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,***H*,,,,/HHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHH*,***,***,**,,***,***,***,***,***,***,***,***,**.HHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHH,,,,,,,,*,,#H#,,,,,*,,,*,,,,,,,,*#H*,,,,,,,,,**HHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHH,,*,***,***,**/.HHHHHHHHHHHHH#*,,,*,***,***,*HHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHH,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*HHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHH**,***,***,***,***,***,***,***,***,***,***,*.HHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHH*,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,*HHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHH**,***,***,*******/..HHHHHHHHH.#/*,*,,,***,***HHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHH*,*,*,******#HHHHHHHHHHHHHHHHHHHHHHHHHHHH./**,,,.HHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHH.,,*,***.HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH.*#HHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHH/,,,*.HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHH,,#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHH.HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
|
||||
env/Scripts/activate.bat
|
||||
python app.py
|
43
run.sh
Normal file
43
run.sh
Normal file
@ -0,0 +1,43 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHH .HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHH. ,HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHH.## HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHH#.HHHHH/*,*,*,*,*,*,*,*,***,*,**#HHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHH.*,,***,***,***,***,***,***,*******HHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHH*,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,*,,,,,HHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHH.,,,***,***,***,***,***,***,***,***,***,***/HHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHH*,,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*HHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHH#,***,***,***,***,***,***,***,***,***,***,***,**HHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHH..HHH,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,*#HHHHHHHHHHHHHHHH
|
||||
echo HHHHHHH,,,**,/H*,***,***,***,,,*,***,***,***,**,,,**,***,***,***H,,*,***HHHHHHHH
|
||||
echo HHHHHH.*,,,*,,,,,*,*,*,***#HHHHH.,,*,*,*,*,**/HHHHH.,*,*,*,*,*,*,*,*****HHHHHHHH
|
||||
echo HHHHHH.*,***,*,*,***,***,.HHHHHHH/**,***,****HHHHHHH.***,***,***,*******HHHHHHHH
|
||||
echo HHHHHH.,,,,,,,,,,,,,,,,,,,.HHHHH.,,,,,,,,,,,,.HHHHHH,,,,,,,,,,,,,,,,,***HHHHHHHH
|
||||
echo HHHHHH.,,,,,,/H,,,**,***,***,,,*,***,***,***,**,,,,*,***,***,***H***,***HHHHHHHH
|
||||
echo HHHHHHH.,,,,*.H,,,,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,***H*,,,,/HHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHH*,***,***,**,,***,***,***,***,***,***,***,***,**.HHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHH,,,,,,,,*,,#H#,,,,,*,,,*,,,,,,,,*#H*,,,,,,,,,**HHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHH,,*,***,***,**/.HHHHHHHHHHHHH#*,,,*,***,***,*HHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHH,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*HHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHH**,***,***,***,***,***,***,***,***,***,***,*.HHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHH*,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,*HHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHH**,***,***,*******/..HHHHHHHHH.#/*,*,,,***,***HHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHH*,*,*,******#HHHHHHHHHHHHHHHHHHHHHHHHHHHH./**,,,.HHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHH.,,*,***.HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH.*#HHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHH/,,,*.HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHH,,#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHH.HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
echo HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
|
||||
|
||||
|
||||
# Activate the virtual environment
|
||||
source env/bin/activate
|
||||
|
||||
# Launch the Python application
|
||||
python app.py
|
@ -1,12 +1,15 @@
|
||||
import pytest
|
||||
|
||||
from app import app
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def client():
|
||||
with app.test_client() as client:
|
||||
yield client
|
||||
|
||||
|
||||
def test_homepage(client):
|
||||
response = client.get('/')
|
||||
response = client.get("/")
|
||||
assert response.status_code == 200
|
||||
assert b"Welcome to my Flask app" in response.data
|
||||
|
@ -6,7 +6,7 @@ if /i "%choice%" equ "Y" (
|
||||
REM Download Python installer
|
||||
echo -n
|
||||
set /p="Removing virtual environment..." <nul
|
||||
powershell -Command "rm env -y"
|
||||
powershell -Command "rm env"
|
||||
echo OK
|
||||
pause
|
||||
) else (
|
||||
|
Loading…
Reference in New Issue
Block a user