Cleared code

This commit is contained in:
Saifeddine ALOUI 2023-08-26 03:07:31 +02:00
parent 115d9aee00
commit 15636fd079
42 changed files with 239 additions and 1543 deletions

View File

@ -0,0 +1,49 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
// README at: https://github.com/devcontainers/templates/tree/main/src/docker-existing-docker-compose
{
"name": "Existing Docker Compose (Extend)",
// Update the 'dockerComposeFile' list if you have more compose files or use different names.
// The .devcontainer/docker-compose.yml file contains any overrides you need/want to make.
"dockerComposeFile": [
"../docker-compose.yml",
"docker-compose.yml"
],
// The 'service' property is the name of the service for the container that VS Code should
// use. Update this value and .devcontainer/docker-compose.yml to the real service name.
"service": "webui",
// The optional 'workspaceFolder' property is the path VS Code should open by default when
// connected. This is typically a file mount in .devcontainer/docker-compose.yml
"workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
"features": {
"ghcr.io/devcontainers/features/python:1": {
"installTools": true,
"optimize": true,
"version": "3.10"
},
"ghcr.io/akhildevelops/devcontainer-features/pip:0": {}
}
// Features to add to the dev container. More info: https://containers.dev/features.
// "features": {},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Uncomment the next line if you want start specific services in your Docker Compose config.
// "runServices": [],
// Uncomment the next line if you want to keep your containers running after VS Code shuts down.
// "shutdownAction": "none",
// Uncomment the next line to run commands after the container is created.
// "postCreateCommand": "cat /etc/os-release",
// Configure tool-specific properties.
// "customizations": {},
// Uncomment to connect as an existing user other than the container default. More info: https://aka.ms/dev-containers-non-root.
// "remoteUser": "devcontainer"
}

View File

@ -0,0 +1,26 @@
version: '3.8'
services:
# Update this to the name of the service you want to work with in your docker-compose.yml file
webui:
# Uncomment if you want to override the service's Dockerfile to one in the .devcontainer
# folder. Note that the path of the Dockerfile and context is relative to the *primary*
# docker-compose.yml file (the first in the devcontainer.json "dockerComposeFile"
# array). The sample below assumes your primary file is in the root of your project.
#
# build:
# context: .
# dockerfile: .devcontainer/Dockerfile
volumes:
# Update this to wherever you want VS Code to mount the folder of your project
- ..:/workspaces:cached
# Uncomment the next four lines if you will use a ptrace-based debugger like C++, Go, and Rust.
# cap_add:
# - SYS_PTRACE
# security_opt:
# - seccomp:unconfined
# Overrides default command so things don't shut down after the process ends.
command: /bin/sh -c "while sleep 1000; do :; done"

View File

@ -6,7 +6,7 @@
# license : Apache 2.0
# Description :
# A front end Flask application for llamacpp models.
# The official GPT4All Web ui
# The official LOLLMS Web ui
# Made by the community for the community
######
import yaml

5
app.py
View File

@ -5,7 +5,7 @@
# license : Apache 2.0
# Description :
# A front end Flask application for llamacpp models.
# The official GPT4All Web ui
# The official LOLLMS Web ui
# Made by the community for the community
######
@ -436,6 +436,9 @@ class LoLLMsWebUI(LoLLMsAPPI):
data = request.get_json()
code = data["code"]
ASCIIColors.info("Executing python code:")
ASCIIColors.yellow(code)
def spawn_process(code):
"""Executes Python code and returns the output as JSON."""

1152
convert.py

File diff suppressed because it is too large Load Diff

View File

@ -17,7 +17,7 @@ npm run dev
```
> Note
> To run the developmen environment you need to create copy of the `.env` file and name it either `.env.development` or if that dont work then `.env.dev`. Set `VITE_GPT4ALL_API_BASEURL = /api/ ` in the `.env.development`.
> To run the developmen environment you need to create copy of the `.env` file and name it either `.env.development` or if that dont work then `.env.dev`. Set `VITE_LoLLMs_API_BASEURL = /api/ ` in the `.env.development`.
> Run your gpt binding by launching `webui.bat` or bash `webui.sh`.
## Building frontend - UI

View File

@ -21,9 +21,9 @@ This Flask server provides various endpoints to manage and interact with the cha
"ggml-vicuna-13b-4bit-rev1.bin",
"ggml-vicuna-7b-4bit-rev1.bin",
"ggml-vicuna-7b-4bit.bin",
"gpt4all-lora-quantized-ggml.bin",
"gpt4all-lora-quantized.bin",
"gpt4all-lora-unfiltered-quantized.bin"
"lollms-lora-quantized-ggml.bin",
"lollms-lora-quantized.bin",
"lollms-lora-unfiltered-quantized.bin"
]
```
- "/list_personalities_languages": GET request endpoint to list all the available personality languages.
@ -120,7 +120,7 @@ This Flask server provides various endpoints to manage and interact with the cha
```
[
{
"content": "##Instructions:\\nGPT4All is a smart and helpful Assistant built by Nomic-AI. It can discuss with humans and assist them.\n",
"content": "##Instructions:\\nLoLLMs is a smart and helpful Assistant built by Nomic-AI. It can discuss with humans and assist them.\n",
"id": 23,
"parent": 0,
"rank": 0,
@ -128,11 +128,11 @@ This Flask server provides various endpoints to manage and interact with the cha
"type": 1
},
{
"content": "Welcome! I am GPT4All A free and open assistant. What can I do for you today?",
"content": "Welcome! I am LoLLMs A free and open assistant. What can I do for you today?",
"id": 24,
"parent": 23,
"rank": 0,
"sender": "gpt4all",
"sender": "lollms",
"type": 0
},
{
@ -148,7 +148,7 @@ This Flask server provides various endpoints to manage and interact with the cha
"id": 26,
"parent": 25,
"rank": 0,
"sender": "gpt4all",
"sender": "lollms",
"type": 0
}
]
@ -173,7 +173,7 @@ This Flask server provides various endpoints to manage and interact with the cha
"debug": false,
"host": "localhost",
"language": "en-US",
"model": "gpt4all-lora-quantized-ggml.bin",
"model": "lollms-lora-quantized-ggml.bin",
"n_predict": 1024,
"n_threads": 8,
"nb_messages_to_remember": 5,
@ -199,15 +199,15 @@ This Flask server provides various endpoints to manage and interact with the cha
```
{
"personality": {
"ai_message_prefix": "###gpt4all:\n",
"ai_message_prefix": "###lollms:\n",
"anti_prompts": [
"###user",
"### user",
"###gpt4all",
"### gpt4all"
"###lollms",
"### lollms"
],
"assets_list": [
"personalities\\english\\generic\\gpt4all\\assets\\logo.png"
"personalities\\english\\generic\\lollms\\assets\\logo.png"
],
"author": "ParisNeo",
"category": "General",
@ -221,13 +221,13 @@ This Flask server provides various endpoints to manage and interact with the cha
"model_temperature": 0.6,
"model_top_k": 50,
"model_top_p": 0.9,
"name": "gpt4all",
"personality_conditioning": "## Information:\nAssistant's name is gpt4all\nToday's date is {{date}}\n## Instructions:\nYour mission is to assist user to perform various tasks and answer his questions\n",
"name": "lollms",
"personality_conditioning": "## Information:\nAssistant's name is lollms\nToday's date is {{date}}\n## Instructions:\nYour mission is to assist user to perform various tasks and answer his questions\n",
"personality_description": "This personality is a helpful and Kind AI ready to help you solve your problems \n",
"user_message_prefix": "###user:\n",
"user_name": "user",
"version": "1.0.0",
"welcome_message": "Welcome! My name is gpt4all.\nHow can I help you today?\n"
"welcome_message": "Welcome! My name is lollms.\nHow can I help you today?\n"
}
}
```

View File

@ -11,7 +11,7 @@ Welcome to the LOLLMS WebUI tutorial! In this tutorial, we will walk you through
1. Visit the GitHub repository page at [github.com/ParisNeo/lollms-webui](https://github.com/ParisNeo/lollms-webui).
2. Click on the "Latest Release" button.
3. Depending on your platform, download either `webui.bat` for Windows or `webui.sh` for Linux.
4. Choose a folder on your system to install the application launcher. For example, you can create a folder named `gpt4all-webui` in your `ai` directory.
4. Choose a folder on your system to install the application launcher. For example, you can create a folder named `lollms-webui` in your `ai` directory.
5. Run the downloaded script (application launcher). Note: Some antivirus programs or Windows Defender might display a warning due to the tool's reputation. This warning is a false positive caused by the tool being relatively new. You can ignore the warning and proceed with the installation.
6. The installer will no longer prompt you to install the default model. This step will be performed in the UI, making it easier for you.
@ -31,7 +31,7 @@ Welcome to the LOLLMS WebUI tutorial! In this tutorial, we will walk you through
## Starting a Discussion
1. Return to the discussions view.
2. Click the "+" button to create a new discussion.
3. You will see a predefined welcome message based on the selected personality configuration. By default, the GPT4All personality is used, which aims to be helpful.
3. You will see a predefined welcome message based on the selected personality configuration. By default, the LoLLMs personality is used, which aims to be helpful.
4. Enter your query or prompt. For example, you can ask, "Who is Abraham Lincoln?"
5. You can stop the generation at any time by clicking the "Stop Generating" button.

View File

@ -1,10 +1,10 @@
# Personalities and What You Can Do with Them
In this tutorial, we will explore the concept of personalities and their capabilities within the GPT4All webui.
In this tutorial, we will explore the concept of personalities and their capabilities within the LoLLMs webui.
## Introduction
The GPT4All webui utilizes the PyAIPersonality library, which provides a standardized way to define AI simulations and integrate AI personalities with other tools, applications, and data. Before diving into the details, let's familiarize ourselves with some key concepts that will help us understand the inner workings of these tools.
The LoLLMs webui utilizes the PyAIPersonality library, which provides a standardized way to define AI simulations and integrate AI personalities with other tools, applications, and data. Before diving into the details, let's familiarize ourselves with some key concepts that will help us understand the inner workings of these tools.
## Large Language Models (LLMs)
@ -53,13 +53,13 @@ Personality settings are defined in a YAML file, which contains parameters and c
Let's take a closer look at the GPT for Art personality, which specializes in generating descriptions of artwork and even transforming descriptions into actual images using the stable diffusion generator.
To use the GPT for Art personality, you need to follow the custom installation steps outlined in the documentation. Once installed, you can leverage its capabilities through the GPT4All webui.
To use the GPT for Art personality, you need to follow the custom installation steps outlined in the documentation. Once installed, you can leverage its capabilities through the LoLLMs webui.
## Using the GPT4All Webui with the GPT for Art Personality
## Using the LoLLMs Webui with the GPT for Art Personality
To select and apply a personality in the GPT4All webui, follow these steps:
To select and apply a personality in the LoLLMs webui, follow these steps:
1. Open the GPT4All webui and navigate to the "Personality" section.
1. Open the LoLLMs webui and navigate to the "Personality" section.
2. Select the GPT for Art personality from the available options.
3. Start a conversation with the AI agent.
@ -77,9 +77,9 @@ By interacting with the AI agent, users can request specific changes or addition
## Conclusion
In this tutorial, we explored the concept of personalities and their integration within the GPT4All webui. We discussed the hardware and software layers, text processing and tokenization, sampling techniques, iterative text generation, and the customization of personality settings.
In this tutorial, we explored the concept of personalities and their integration within the LoLLMs webui. We discussed the hardware and software layers, text processing and tokenization, sampling techniques, iterative text generation, and the customization of personality settings.
We also delved into the GPT for Art personality, its installation steps, and how to apply it in the GPT4All webui. Through an example discussion with the artbot, we witnessed the collaborative creative process between users and AI.
We also delved into the GPT for Art personality, its installation steps, and how to apply it in the LoLLMs webui. Through an example discussion with the artbot, we witnessed the collaborative creative process between users and AI.
The GPT4All webui, coupled with AI personalities, opens up a world of possibilities for generating personalized and contextually relevant text. With further enhancements and customization, these tools have the potential to revolutionize various industries and creative endeavors.
The LoLLMs webui, coupled with AI personalities, opens up a world of possibilities for generating personalized and contextually relevant text. With further enhancements and customization, these tools have the potential to revolutionize various industries and creative endeavors.

View File

@ -6,19 +6,19 @@ Extensions are little projects built by the community that can be plugged to the
There are many types of extensions:
1 - pipeline extensions
These extensions have no UI, they only intercept the communication between the user and the AI, perform some modifications or operations, then submit them to the discussion to enritch it. For example:
- Net enabled GPT4All (under construction at https://github.com/ParisNeo/Net_enabled-GPT4All-Extension) : An extension that offers a special personality that indicates to the chatbot that whenever the user is asking a question it has no answer to, it should invoke a search function. The extension intercepts this keyword, do the research on the net then mirror it back to the AI. The AI can then use those inputs to formulate an answer.
- Image enabled GPT4All : An extension that uses Blip to convert an image into text that can be interpreted by the AI and used in the discussion.
- Net enabled LoLLMs (under construction at https://github.com/ParisNeo/Net_enabled-LoLLMs-Extension) : An extension that offers a special personality that indicates to the chatbot that whenever the user is asking a question it has no answer to, it should invoke a search function. The extension intercepts this keyword, do the research on the net then mirror it back to the AI. The AI can then use those inputs to formulate an answer.
- Image enabled LoLLMs : An extension that uses Blip to convert an image into text that can be interpreted by the AI and used in the discussion.
The extension should offer a yaml file that describes it to allow the system to integrate it.
```yaml
# This is a gpt4all extension project
# This is a lollms extension project
# Project name : Models tester
# Author : ParisNeo
# Description :
# This extension allows applying the model on a bunch of questions at once and recover answers in a text file
name: GPT4All-Models-Tester-Extension
name: LoLLMs-Models-Tester-Extension
author: ParisNeo
description: |
This extension allows applying the model on a bunch of questions at once and recover answers in a text file

View File

@ -1,4 +1,4 @@
# Installing GPT4All-Webui on Linux or macOS:
# Installing lollms-webui on Linux or macOS:
\- Make sure you have all the dependencies for requirements
`python3.11 -m pip install cmake`

View File

@ -1,4 +1,4 @@
# Using GPT4All-Webui on Linux or macOS:
# Using lollms-webui on Linux or macOS:
To run the Flask server, execute the following command:

View File

@ -10,7 +10,7 @@ Before starting, let me tell you what this project is made for. This project is
This project is under Apache 2.0 licence which is an open source licence that can be used commercially, so people can built things from this and use it in their business.
Also, please don't confuse the GPT4All application built by Nomic AI which is an interesting more professional application that you can find on their website gpt4all.io. It has a great community and I encourage you to check it up.
Also, please don't confuse the LoLLMs application built by Nomic AI which is an interesting more professional application that you can find on their website lollms.io. It has a great community and I encourage you to check it up.
I have built this ui to explore new things and build on top of it. I am not building a company out of this, this is a side project. I just want to give back to the open source community and help make this technology available for all (hence the name).
@ -23,7 +23,7 @@ Before installing this tool you need to install python 3.10 or higher as well as
Now let's cut to the chace. Let's start by installing the tool.
First, go to the github repository page at github.com/ParisNeo/lollms-webui then press the latest release button. Depending on your platform download webui.bat for windows or webui.sh for linux.
We call this file, the application launcher. Make sure you install the launcher in a folder you choose. For example I'll put it in my ai folder at gpt4all-webui.
We call this file, the application launcher. Make sure you install the launcher in a folder you choose. For example I'll put it in my ai folder at lollms-webui.
Now let's run the script.
You may encounter a warning from some antivirus or windows defender warining you about the script. It is a false positive caused by the reputation condition in some antiviruses. This means if a program is not used by enough users, some antiviruses consider it dangerous. This is true for this tool as it is new and not enough people as using it as of now so I have to wait for it to become more accepted.
@ -47,7 +47,7 @@ Notice that applying modifications does not save the configuration, so You need
Now your model is selected and you are ready to start your first discussion.
Let's go back to discussions view. To create a new discussion, press the + button. You should see the personality welcome message. This is a predefined welcome message that you can find in the personality configuration file. by default, we use the GPT4All personality which is conditioned to be a helpful personality. Let's ask it something. For example, who is Abraham Lincoln?
Let's go back to discussions view. To create a new discussion, press the + button. You should see the personality welcome message. This is a predefined welcome message that you can find in the personality configuration file. by default, we use the LoLLMs personality which is conditioned to be a helpful personality. Let's ask it something. For example, who is Abraham Lincoln?
You can stop the generation at any time by pressing the Stop Generating button.

View File

@ -1,5 +1,5 @@
Hi Every one.
This is a new video about Lord of Large language models, formally known as GPT4All webui.
This is a new video about Lord of Large language models, formally known as LoLLMs webui.
In this video, we start by presenting the tool, its phylosophy and it's main goals. Then, we discuss how to install and use it, we dive deep into its different use cases and how you can harness the power of Large language models in one tool. We will also do some interesting tests and comparisons of models and bindings, and we'll finish by some thoughts about AI, its benefits and dangers.
@ -33,7 +33,7 @@ You will be asked to select a personal folder. This folder will contain:
- the discussion database
Make sure to put this folder to a partition that has enough space as models may be heavy sometimes.
Here I just press enter to choose the default location which is my documents folder.
The first time you run this application, you are prompted to select the binding. bindings are bridge modules that allows lollms to talk to different libraries that can run language models. If you are using a mac, I would recommend using gpt4all binding. If you have a powerful GPU and want to use as many models as possible then you go with ctransformers. The fastest for llama models is the official llama cpp binding. The Pyllamacpp is a tiny stable binding that runs with only llama models but can run on any pc seamlessly. As of today, GPTQ binding can run but it is still in experimental stage. Maybe use it in few weeks. I have a GPU, and want to test many models, so I'll go with CTransformers.
The first time you run this application, you are prompted to select the binding. bindings are bridge modules that allows lollms to talk to different libraries that can run language models. If you are using a mac, I would recommend using lollms binding. If you have a powerful GPU and want to use as many models as possible then you go with ctransformers. The fastest for llama models is the official llama cpp binding. The Pyllamacpp is a tiny stable binding that runs with only llama models but can run on any pc seamlessly. As of today, GPTQ binding can run but it is still in experimental stage. Maybe use it in few weeks. I have a GPU, and want to test many models, so I'll go with CTransformers.
This may take few minutes to complete as it should install many modules.
Let's fastforward.
Once the binding is installed, you need to select a first model. You have the choice between installing a model from the internet or link to a local model file. This allows you tu mutualize models with other tools like Gpt4all or oobbabooga's text generation webui.

View File

@ -1,6 +1,6 @@
Hi there. In this video, we are going to talk about the personalities and what you can do with them.
The GPT4All webui uses my PyAIPersonality library under the hood. I have buit this library to create a standard way to define AI simulations and integrate the AI personality with other tools, applications and data. Before starting, I want to explain some concepts to make it easy for you to understand the inner workings of these tools. Let's dive right in.
The LoLLMs webui uses my PyAIPersonality library under the hood. I have buit this library to create a standard way to define AI simulations and integrate the AI personality with other tools, applications and data. Before starting, I want to explain some concepts to make it easy for you to understand the inner workings of these tools. Let's dive right in.
Large Language Models (LLMs) are powerful text processing models based on machine learning techniques. As their name suggests, these models are characterized by their substantial size and versatility in handling various text-based tasks. In the context of this work, we focus specifically on text generation models.
@ -124,7 +124,7 @@ As we can see, the model did the requested changes.
Keep in mind that this tool is still in its early stages of development, and there's plenty of room for improvement. One way to enhance its performance is by adjusting the default sampler to an Euler sampler, which can potentially yield even better results. Additionally, you have the flexibility to explore a wide range of models available on Hugging Face repositories. With thousands of models at your disposal, you can experiment and choose the one that aligns best with your specific needs and preferences. By making these adjustments, you can take this tool to new heights and unlock its full potential.
Please note that all the generated images bear a watermark with the GPT4All signature, serving as a clear indication that they were created by AI using the stable diffusion WatermarkEncoder. This step is crucial to promote responsible AI usage and ensure that each generated work is properly identified as an AI creation.
Please note that all the generated images bear a watermark with the LoLLMs signature, serving as a clear indication that they were created by AI using the stable diffusion WatermarkEncoder. This step is crucial to promote responsible AI usage and ensure that each generated work is properly identified as an AI creation.
It's important to emphasize that this tool is intended for appreciating art, fostering creative exploration, and sparking new ideas. It is not meant for malicious purposes or spreading misinformation. We firmly stand against such misuse.
@ -161,7 +161,7 @@ Let's put GPT 4 Internet to the test with a current affairs question: Who is the
As you can observe, the personality performed its intended function flawlessly. It intelligently crafted a well-tailored query, conducted the search seamlessly behind the scenes, and swiftly presented the desired information along with proper source attribution. This showcases the power of leveraging the internet to enhance the AI's capabilities and provide you with accurate and reliable answers.
Finally, to install the personalities, go to the root of your gpt4all webui application and open a terminal. Then type installation/add_personality.bat or add_personality.sh depending on your operating system. you'll be prompted to choose a language, then a category, and finally the personality you want to install. Once installed, your personality will apear in the zoo.
Finally, to install the personalities, go to the root of your lollms webui application and open a terminal. Then type installation/add_personality.bat or add_personality.sh depending on your operating system. you'll be prompted to choose a language, then a category, and finally the personality you want to install. Once installed, your personality will apear in the zoo.
Alright, let's wrap up here to keep the video concise. With over 250 personalities to explore, we've only scratched the surface of what GPT 4 All has to offer. While not all personalities have been fully adapted to the new format, a majority of them are already functional and ready for testing.

View File

@ -1,3 +0,0 @@
call ../env/Scripts/activate.bat
python install_binding.py %*
pause

View File

@ -1,4 +0,0 @@
#!/bin/bash
source ../env/bin/activate
python install_binding.py "$@"
read -p "Press any key to continue..."

View File

@ -1,55 +0,0 @@
@echo off
setlocal enabledelayedexpansion
REM Clone the repository to a tmp folder
set "REPO_URL=https://github.com/ParisNeo/PyAIPersonality.git"
set "TMP_FOLDER=%temp%\PyAIPersonality"
git clone %REPO_URL% %TMP_FOLDER%
REM List the available languages and prompt user to select one
set "LANGUAGES_FOLDER=%TMP_FOLDER%\personalities_zoo"
set "LANGUAGE_INDEX=0"
for /d %%d in ("%LANGUAGES_FOLDER%\*") do (
set /a "LANGUAGE_INDEX+=1"
set "LANGUAGES[!LANGUAGE_INDEX!]=%%~nxd"
echo !LANGUAGE_INDEX!. %%~nxd
)
set /p "SELECTED_LANGUAGE=Enter the number of the desired language: "
set "LANGUAGE_FOLDER=%LANGUAGES_FOLDER%\!LANGUAGES[%SELECTED_LANGUAGE%]!"
REM List the available categories and prompt user to select one
set "CATEGORIES_FOLDER=%LANGUAGE_FOLDER%"
set "CATEGORY_INDEX=0"
for /d %%d in ("%CATEGORIES_FOLDER%\*") do (
set /a "CATEGORY_INDEX+=1"
set "CATEGORIES[!CATEGORY_INDEX!]=%%~nxd"
echo !CATEGORY_INDEX!. %%~nxd
)
set /p "SELECTED_CATEGORY=Enter the number of the desired category: "
set "CATEGORY_FOLDER=%CATEGORIES_FOLDER%\!CATEGORIES[%SELECTED_CATEGORY%]!"
REM List the available personalities and prompt user to select one
set "PERSONALITIES_FOLDER=%CATEGORY_FOLDER%"
set "PERSONALITY_INDEX=0"
for /d %%d in ("%PERSONALITIES_FOLDER%\*") do (
set /a "PERSONALITY_INDEX+=1"
set "PERSONALITIES[!PERSONALITY_INDEX!]=%%~nxd"
echo !PERSONALITY_INDEX!. %%~nxd
)
set /p "SELECTED_PERSONALITY=Enter the number of the desired personality: "
set "PERSONALITY_FOLDER=%PERSONALITIES_FOLDER%\!PERSONALITIES[%SELECTED_PERSONALITY%]!"
REM Copy the selected personality folder to personalities/language/category folder
set "OUTPUT_FOLDER=%CD%\personalities\!LANGUAGES[%SELECTED_LANGUAGE%]!\!CATEGORIES[%SELECTED_CATEGORY%]!\!PERSONALITIES[%SELECTED_PERSONALITY%]!"
if not exist "%OUTPUT_FOLDER%" mkdir "%OUTPUT_FOLDER%"
xcopy /e /y "%PERSONALITY_FOLDER%" "%OUTPUT_FOLDER%"
REM cleaning
if exist "./tmp" (
echo Cleaning tmp folder
rd /s /q "./tmp"
)
REM Remove the tmp folder
rd /s /q "%TMP_FOLDER%"
echo Done
pause

View File

@ -1,53 +0,0 @@
#!/usr/bin/env bash
# Clone the repository to a tmp folder
REPO_URL="https://github.com/ParisNeo/PyAIPersonality.git"
TMP_FOLDER=$(mktemp -d)
git clone "$REPO_URL" "$TMP_FOLDER"
# List the available languages and prompt user to select one
LANGUAGES_FOLDER="$TMP_FOLDER/personalities_zoo"
LANGUAGE_INDEX=0
for d in "$LANGUAGES_FOLDER"/*; do
LANGUAGE_INDEX=$((LANGUAGE_INDEX+1))
LANGUAGES[$LANGUAGE_INDEX]=$(basename "$d")
echo "$LANGUAGE_INDEX. ${LANGUAGES[$LANGUAGE_INDEX]}"
done
read -p "Enter the number of the desired language: " SELECTED_LANGUAGE
LANGUAGE_FOLDER="$LANGUAGES_FOLDER/${LANGUAGES[$SELECTED_LANGUAGE]}"
# List the available categories and prompt user to select one
CATEGORIES_FOLDER="$LANGUAGE_FOLDER"
CATEGORY_INDEX=0
for d in "$CATEGORIES_FOLDER"/*; do
CATEGORY_INDEX=$((CATEGORY_INDEX+1))
CATEGORIES[$CATEGORY_INDEX]=$(basename "$d")
echo "$CATEGORY_INDEX. ${CATEGORIES[$CATEGORY_INDEX]}"
done
read -p "Enter the number of the desired category: " SELECTED_CATEGORY
CATEGORY_FOLDER="$CATEGORIES_FOLDER/${CATEGORIES[$SELECTED_CATEGORY]}"
# List the available personalities and prompt user to select one
PERSONALITIES_FOLDER="$CATEGORY_FOLDER"
PERSONALITY_INDEX=0
for d in "$PERSONALITIES_FOLDER"/*; do
PERSONALITY_INDEX=$((PERSONALITY_INDEX+1))
PERSONALITIES[$PERSONALITY_INDEX]=$(basename "$d")
echo "$PERSONALITY_INDEX. ${PERSONALITIES[$PERSONALITY_INDEX]}"
done
read -p "Enter the number of the desired personality: " SELECTED_PERSONALITY
PERSONALITY_FOLDER="$PERSONALITIES_FOLDER/${PERSONALITIES[$SELECTED_PERSONALITY]}"
# Copy the selected personality folder to personalities/language/category folder
OUTPUT_FOLDER="$(pwd)/personalities/${LANGUAGES[$SELECTED_LANGUAGE]}/${CATEGORIES[$SELECTED_CATEGORY]}/${PERSONALITIES[$SELECTED_PERSONALITY]}"
mkdir -p "$OUTPUT_FOLDER"
cp -r "$PERSONALITY_FOLDER/." "$OUTPUT_FOLDER"
# Cleaning
if [[ -d "./tmp" ]]; then
echo "Cleaning tmp folder"
rm -rf "./tmp"
fi
# Remove the tmp folder
rm -rf "$TMP_FOLDER"

View File

@ -1 +0,0 @@
GPT4All_GPTJ_binding : https://github.com/ParisNeo/GPT4All_GPTJ_binding

View File

@ -1,27 +0,0 @@
@echo off
rem Set the environment name
set environment_name=env
rem Activate the virtual environment
call %environment_name%\Scripts\activate.bat
rem Change to the installations subfolder
rem Run the Python script
python installations/download_all_personalities.py
rem Deactivate the virtual environment
echo deactivating
call %environment_name%\Scripts\deactivate.bat
rem Remove tmp folder
set "folder=tmp"
if exist "%folder%" (
echo Folder exists. Deleting...
rd /s /q "%folder%"
echo Folder deleted.
) else (
echo Folder does not exist.
)

View File

@ -1,57 +0,0 @@
import os
import shutil
from pathlib import Path
def copy_files(source_path, destination_path):
for item in os.listdir(source_path):
source_item = source_path / item
destination_item = destination_path / item
if source_item.is_file():
# Remove destination file if it already exists
try:
if destination_item.exists():
destination_item.unlink()
# Copy file from source to destination
shutil.copy2(str(source_item), str(destination_item))
except:
print(f"Couldn't install personality {item}")
elif source_item.is_dir():
# Create destination directory if it does not exist
destination_item.mkdir(parents=True, exist_ok=True)
# Recursively copy files in subdirectories
copy_files(source_item, destination_item)
import subprocess
def clone_and_copy_repository(repo_url):
tmp_folder = Path("tmp/git_clone")
personalities_folder = Path("personalities")
subfolder_name = "personalities_zoo"
# Clone the repository to a temporary folder
subprocess.run(["git", "clone", repo_url, str(tmp_folder)])
# Check if the repository was cloned successfully
if not tmp_folder.exists():
print("Failed to clone the repository.")
return
# Construct the source and destination paths for copying the subfolder
subfolder_path = tmp_folder / subfolder_name
destination_path = Path.cwd() / personalities_folder
# Copy files and folders recursively
print(f"copying")
copy_files(subfolder_path, destination_path)
# Remove the temporary folder
shutil.rmtree(str(tmp_folder))
print("Repository clone and copy completed successfully.")
# Example usage
repo_url = "https://github.com/ParisNeo/PyAIPersonality.git"
clone_and_copy_repository(repo_url)

View File

@ -1,27 +0,0 @@
#!/bin/bash
# Set the environment name
environment_name="env"
# Activate the virtual environment
source "$environment_name/bin/activate"
# Change to the installations subfolder
# Run the Python script
python installations/download_all_personalities.py
# Deactivate the virtual environment
echo "deactivating"
deactivate
# Remove tmp folder
folder="tmp"
if [ -d "$folder" ]; then
echo "Folder exists. Deleting..."
rm -r "$folder"
echo "Folder deleted."
else
echo "Folder does not exist."
fi

View File

@ -1,60 +0,0 @@
import argparse
import subprocess
import shutil
import yaml
from pathlib import Path
def install_binding(binding_name):
# Load the list of available bindings from bindinglist.yaml
with open('bindinglist.yaml', 'r') as f:
binding_list = yaml.safe_load(f)
# Get the Github repository URL for the selected binding
try:
binding_url = binding_list[binding_name]
except KeyError:
print(f"Binding '{binding_name}' not found in bindinglist.yaml")
return
# Clone the Github repository to a tmp folder
tmp_folder = Path('tmp')
if tmp_folder.exists():
shutil.rmtree(tmp_folder)
subprocess.run(['git', 'clone', binding_url, tmp_folder])
# Install the requirements.txt from the cloned project
requirements_file = tmp_folder / 'requirements.txt'
subprocess.run(['pip', 'install', '-r', str(requirements_file)])
# Copy the folder found inside the binding to ../bindings
folders = [f for f in tmp_folder.iterdir() if f.is_dir() and not f.stem.startswith(".")]
src_folder = folders[0]
dst_folder = Path('../bindings') / src_folder.stem
print(f"coipying from {src_folder} to {dst_folder}")
# Delete the destination directory if it already exists
if dst_folder.exists():
shutil.rmtree(dst_folder)
shutil.copytree(src_folder, dst_folder)
# Create an empty folder in ../models with the same name
models_folder = Path('../models')
models_folder.mkdir(exist_ok=True)
(models_folder / binding_name).mkdir(exist_ok=True, parents=True)
if tmp_folder.exists():
shutil.rmtree(tmp_folder)
if __name__ == '__main__':
# Load the list of available bindings from bindinglist.yaml
with open('bindinglist.yaml', 'r') as f:
binding_list = yaml.safe_load(f)
# Print the list of available bindings and prompt the user to select one
print("Available bindings:")
for binding_id, binding_name in enumerate(binding_list):
print(f" {binding_id} - {binding_name}")
binding_id = int(input("Select a binding to install: "))
install_binding(list(binding_list.keys())[binding_id])

View File

@ -1,7 +0,0 @@
echo this will recompile llapacpp to use your hardware with gpu enabled.
pip uninstall llama-cpp-python -y
rem First we need to purge any old installation
pip cache purge
set CMAKE_ARGS=-DLLAMA_CUBLAS=on
set FORCE_CMAKE=1
pip install llama-cpp-python --upgrade

View File

@ -1,7 +0,0 @@
echo "this will recompile llapacpp to use your hardware with gpu enabled."
pip uninstall llama-cpp-python -y
# First we need to purge any old installation
pip cache purge
export CMAKE_ARGS="-DLLAMA_CUBLAS=on"
export FORCE_CMAKE=1
pip install llama-cpp-python --upgrade

View File

@ -0,0 +1,27 @@
name: Make programming project
content: |
```@<Language:all_programming_language_options>@
# project: @<Project name>@
# author: @<Author name>@
# description: @<The description of the code>@@<generation_placeholder>@
```
---------
Extra information:
Licence: apache 2.0
Program type: Stand alone.
Documentation:
Make README.md with the following table of contents:
## Description
## Installation
## Usage
## Licence
## Contribute
## Ethical guidelines
Instructions:
Write a user side README.md
Stick to the provided code content and do not invent extra information.
Make sure all sections of the table of contents are present in the file.
----
README.md:
```markdown@<generation_placeholder>@
```",

View File

@ -1,6 +1,6 @@
{
"Build a Latex Book": "@<Add some context information to give the AI some context about the book or leave blank if you have no specific idea>@\n```latex\n\\documentclass[12pt]{book}\n\\usepackage{url}\n\\begin{document}\n\\title{@<Title of the book>@}\n\\author{@<Author name>@} % Author\n\\date{\\today} % Date\n\\maketitle\n\\tableofcontents\n\\chapter{Introduction}\n@<generation_placeholder>@\n\\end{document}\n```",
"Simple Book writing":"Once apon a time",
"Simple Book writing":"#@<Title of the book:The advantures of Gandalf and Darth Vador>@\n@<Start the story:Once apon a time in middle earth>@",
"Simple Question Answer":"User:@<What is your question>@\nAssistant:@<generation_placeholder>@",
"Question Answer with conditionning":"Assistant is a highly developed AI capable of answering any question about any subject.\nUser:@<What's your question?>\nAssistant:@<generation_placeholder>@",
"Instruct mode": "Instructions:\n@<Give instructions to the AI>@\nAnswer:@<generation_placeholder>@",

View File

@ -1,2 +1,4 @@
name: Simple Book writing
content: Once apon a time
content: |
# @<Title of the book:The advantures of Gandalf and Darth Vador>@
@<Start the story:Once apon a time in middle earth>@@<generation_placeholder>@

View File

@ -0,0 +1,8 @@
name: Translate text
content: |
```@<Source language:all_language_options>@
@<Text to translate>@
```
```@<Destination language:all_language_options>@
@<generation_placeholder>@
```

View File

@ -1,13 +1,17 @@
tqdm
psutil
flask
flask_socketio
nomic
pytest
pyyaml
markdown
pyllamacpp==2.0.0
gpt4all-j
gpt4all
transformers
pyaipersonality>=0.0.11
git
gevent
gevent-websocket
lollms
langchain
requests
eventlet
websocket-client
GitPython
setuptools
numpy

View File

@ -9,4 +9,4 @@ cd tmp\llama.cpp
git checkout 6c248707f51c8a50f7792e7f7787ec481881db88
cd ../..
echo Converting ...
python tmp\llama.cpp\convert-gpt4all-to-ggml.py "%filename%" "%tokenizer%"
python tmp\llama.cpp\convert-lollms-to-ggml.py "%filename%" "%tokenizer%"

View File

@ -10,4 +10,4 @@ cd tmp\llama.cpp
$(git checkout 6c248707f51c8a50f7792e7f7787ec481881db88)
cd ../..
echo Converting ...
python -c tmp\llama.cpp\convert-gpt4all-to-ggml.py \"$FILENAME\" \"$TOKENIZER\"
python -c tmp\llama.cpp\convert-lollms-to-ggml.py \"$FILENAME\" \"$TOKENIZER\"

View File

@ -43,7 +43,7 @@
<p class="mb-4">Here are the developers who worked on this website:</p>
<ul class="list-disc list-inside mb-4">
<li>@ParisNeo : Creator of the project and Lead developer</li>
<li>@AndriyMulyar : CEO of Nomic-ai who offered to link the project as their official ui for GPT4All</li>
<li>@AndriyMulyar : CEO of Nomic-ai who offered to link the project as their official ui for LoLLMs</li>
<li><a href="https://github.com/ParisNeo/lollms-webui/graphs/contributors" target="_blank" class="text-blue-900 dark:text-blue-600">A number of very talented open-source developers without whom this project wouldn't be as awesome as it is.</a></li>
<li> We also appreciate the support of the users of this tool who have helped us in various ways.</li>
</ul>

View File

@ -2,7 +2,7 @@
<html>
<head>
<meta charset="utf-8">
<title>GPT4All - WEBUI</title>
<title>LoLLMs - WEBUI</title>
<link rel="stylesheet" href="{{ url_for('static', filename='css/utilities.min.css') }}">
<link rel="stylesheet" href="{{ url_for('static', filename='css/tailwind.min.css') }}">
<link rel="stylesheet" href="{{ url_for('static', filename='css/tailwindcss.min.css') }}">

View File

@ -22,7 +22,7 @@
</div>
<div class="mb-4 flex-row">
<label class="font-bold mb-2" for="model">Personalities</label>
<select class="bg-gray-200 dark:bg-gray-700 shadow appearance-none border rounded py-2 px-3 leading-tight focus:outline-none focus:shadow-outline" id="personalities" name="personalities" value="gpt4all_chatbot.yaml">
<select class="bg-gray-200 dark:bg-gray-700 shadow appearance-none border rounded py-2 px-3 leading-tight focus:outline-none focus:shadow-outline" id="personalities" name="personalities" value="lollms_chatbot.yaml">
</select>
</div>
<div class="mb-4 flex-row">

View File

@ -83,7 +83,7 @@ Content-Type: application/json
{
"language": "english",
"category": "generic",
"folder": "gpt4all"
"folder": "lollms"
}
############################################
### Unmount personality

File diff suppressed because one or more lines are too long

2
web/dist/index.html vendored
View File

@ -6,7 +6,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>LoLLMS WebUI - Welcome</title>
<script type="module" crossorigin src="/assets/index-8f2cef47.js"></script>
<script type="module" crossorigin src="/assets/index-170c73a7.js"></script>
<link rel="stylesheet" href="/assets/index-9b220b2a.css">
</head>
<body>

View File

@ -1,7 +1,9 @@
<template>
<div class="menu-container">
<button @click.prevent="toggleMenu" class="menu-button bg-blue-500 text-white dark:bg-blue-200 dark:text-gray-800 rounded-full flex items-center justify-center w-6 h-6 border-none cursor-pointer hover:bg-blue-400 w-8 h-8 rounded-full object-fill text-red-700 border-2 active:scale-90 hover:z-20 hover:-translate-y-2 duration-150 border-gray-300 border-secondary cursor-pointer" ref="menuButton">
<i data-feather="command" class="w-5 h-5"></i>
<img v-if="icon && !icon.includes('feather')" :src="command.icon" :alt="command.name" class="w-5 h-5">
<i v-if="icon && icon.includes('feather')" :data-feather="command.icon.split(':')[1]" class="w-5 h-5"></i>
<i data-feather="command" ></i>
</button>
<transition name="slide">
<div v-if="isMenuOpen" class="menu-list flex-grow" :style="menuPosition" ref="menu">
@ -23,10 +25,19 @@ import { nextTick } from 'vue'
import feather from 'feather-icons'
export default {
props: {
icon: {
type:String,
required:false,
value:"feather:command"
},
commands: {
type: Array,
required: true
},
force_position:{
required: false,
value:0
},
execute_cmd: {
type: Function, // The execute_cmd property should be a function
required: false
@ -75,13 +86,26 @@ handleClickOutside(event) {
}
},
positionMenu() {
var isMenuAboveButton;
if (this.$refs.menuButton!=undefined){
console.log(this.force_position)
if(this.force_position==0 || this.force_position==undefined){
console.log("auto position")
const buttonRect = this.$refs.menuButton.getBoundingClientRect();
//const menuRect = this.$refs.menu.getBoundingClientRect();
const windowHeight = window.innerHeight;
const isMenuAboveButton = buttonRect.bottom > windowHeight / 2;
isMenuAboveButton = buttonRect.bottom > windowHeight / 2;
}
else if (this.force_position==1){
console.log("Menu above button")
isMenuAboveButton=true;
}
else{
console.log("Menu below button")
isMenuAboveButton=false;
}
this.menuPosition.top = isMenuAboveButton ? 'auto' : 'calc(100% + 10px)';
this.menuPosition.bottom = isMenuAboveButton ? '100%' : 'auto';
}

View File

@ -1,7 +1,7 @@
<template>
<div
class=" min-w-96 items-start p-4 hover:bg-primary-light rounded-lg mb-2 shadow-lg border-2 cursor-pointer active:scale-95 duration-75 select-none"
@click.stop="toggleSelected" tabindex="-1" :class="selected_computed ? 'border-primary-light' : 'border-transparent'"
class=" min-w-96 items-start p-4 hover:bg-primary-light rounded-lg mb-2 shadow-lg border-2 cursor-pointer select-none"
tabindex="-1" :class="selected_computed ? 'border-primary-light' : 'border-transparent'"
:title="!personality.installed ? 'Not installed' : ''">
<div :class="!personality.installed ? 'opacity-50' : ''">
@ -13,13 +13,19 @@
<h3 class="font-bold font-large text-lg line-clamp-3">
{{ personality.name }}
</h3>
<button type="button" title="Talk"
@click="toggleSelected"
class="hover:text-secondary duration-75 active:scale-90 font-medium rounded-lg text-sm p-2 text-center inline-flex items-center " @click.stop="">
<i data-feather="check" class="w-5"></i>
<span class="sr-only">Select</span>
</button>
<button type="button" title="Talk"
@click="toggleTalk"
class="hover:text-secondary duration-75 active:scale-90 font-medium rounded-lg text-sm p-2 text-center inline-flex items-center " @click.stop="">
<i data-feather="send" class="w-5"></i>
<span class="sr-only">Talk</span>
</button>
<InteractiveMenu :commands="commandsList">
<InteractiveMenu :commands="commandsList" :force_position=2>
</InteractiveMenu>