Merge branch 'main' into personality-zoo

This commit is contained in:
andzejsp 2023-05-23 21:45:24 +03:00
commit e742958905
5 changed files with 77 additions and 61 deletions

20
app.py
View File

@ -284,6 +284,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
personalities[language_folder.name][category_folder.name].append(personality_info)
return json.dumps(personalities)
# Settings (data: {"setting_name":<the setting name>,"setting_value":<the setting value>})
def update_setting(self):
data = request.get_json()
@ -315,11 +316,19 @@ class Gpt4AllWebUI(GPT4AllAPI):
back_language = self.config["personality_language"]
if self.config["personality_language"]!=data['setting_value']:
self.config["personality_language"]=data['setting_value']
cats = self.list_personalities_categories()
personalities_categories_dir = Path(f'./personalities/{self.config["personality_language"]}') # replace with the actual path to the models folder
cats = [f.stem for f in personalities_categories_dir.iterdir() if f.is_dir()]
if len(cats)>0:
back_category = self.config["personality_category"]
self.config["personality_category"]=cats[0]
pers = json.loads(self.list_personalities().data.decode("utf8"))
try:
personalities_dir = Path(f'./personalities/{self.config["personality_language"]}/{self.config["personality_category"]}') # replace with the actual path to the models folder
pers = [f.stem for f in personalities_dir.iterdir() if f.is_dir()]
except Exception as ex:
pers=[]
if self.config["debug"]:
print(f"No personalities found. Using default one {ex}")
if len(pers)>0:
self.config["personality"]=pers[0]
personality_fn = f"personalities/{self.config['personality_language']}/{self.config['personality_category']}/{self.config['personality']}"
@ -363,10 +372,11 @@ class Gpt4AllWebUI(GPT4AllAPI):
elif setting_name== "backend":
if self.config['backend']!= data['setting_value']:
print("New backend selected")
print(f"New backend selected : {data['setting_value']}")
self.config["backend"]=data['setting_value']
try:
self.backend = self.process.load_backend(self.config["backend"])
self.backend = self.process.load_backend(self.config["backend"], install=True)
except Exception as ex:
print("Couldn't build backend")
return jsonify({'setting_name': data['setting_name'], "status":False, 'error':str(ex)})
@ -383,7 +393,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
if self.config["debug"]:
print(f"Configuration {data['setting_name']} set to {data['setting_value']}")
print("Configuration updated")
print(f"Configuration {data['setting_name']} updated")
# Tell that the setting was changed
return jsonify({'setting_name': data['setting_name'], "status":True})

View File

@ -1,11 +1,61 @@
- bestMPT: 'true'
description: Current best non-commercially licensable chat model based on MPT and
trained by Mosaic ML.
filename: ggml-mpt-7b-chat.bin
filesize: '4854401050'
isDefault: 'true'
license: Non commercial
owner_link: https://gpt4all.io
md5sum: 756249d3d6abe23bde3b1ae272628640
owner: Nomic AI
requires: 2.4.1
server: https://gpt4all.io/models/
- description: A commericially licensable instruct model based on MPT and trained
by Mosaic ML.
filename: ggml-mpt-7b-instruct.bin
filesize: '4854401028'
license: Apache 2.0
owner_link: https://gpt4all.io
md5sum: 1cfa4958f489f0a0d1ffdf6b37322809
owner: Nomic AI
server: https://gpt4all.io/models/
- description: A commercially licensable model base pre-trained by Mosaic ML.
filename: ggml-mpt-7b-base.bin
filesize: '4854401028'
license: Non commercial
owner_link: https://gpt4all.io
md5sum: 120c32a51d020066288df045ef5d52b9
owner: Nomic AI
requires: 2.4.1
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Llama 7b and trained by
teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.
filename: ggml-vicuna-7b-1.1-q4_2.bin
filesize: '4212859520'
license: Non commercial
owner_link: https://gpt4all.io
md5sum: 29119f8fa11712704c6b22ac5ab792ea
owner: Nomic AI
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Llama 7b and trained by
Microsoft and Peking University.
filename: ggml-wizardLM-7B.q4_2.bin
filesize: '4212864640'
license: Non commercial
owner_link: https://gpt4all.io
md5sum: 99e6d129745a3f1fb1121abed747b05a
owner: Nomic AI
server: https://gpt4all.io/models/
- md5sum: 679fc463f01388ea2d339664af0a0836
filename: ggml-wizard-13b-uncensored.bin
server: https://gpt4all.io/models/
filesize: 8136777088
owner: Nomic AI
owner_link: https://gpt4all.io
description: A non-commercially licensable model based on Wizard Vicuna 13b.
- md5sum: d5eafd5b0bd0d615cfd5fd763f642dfe
filename: ggml-nous-gpt4-vicuna-13b.bin
server: https://gpt4all.io/models/
filesize: 8136777088
description: A non-commercially licensable model based on Vicuna 13b, fine-tuned on ~180,000 instructions, trained by Nous Research.
owner: Nomic AI
@ -31,18 +81,6 @@
md5sum: 91f886b68fbce697e9a3cd501951e455
owner: Nomic AI
server: https://gpt4all.io/models/
- bestMPT: 'true'
description: Current best non-commercially licensable chat model based on MPT and
trained by Mosaic ML.
filename: ggml-mpt-7b-chat.bin
filesize: '4854401050'
isDefault: 'true'
license: Non commercial
owner_link: https://gpt4all.io
md5sum: 756249d3d6abe23bde3b1ae272628640
owner: Nomic AI
requires: 2.4.1
server: https://gpt4all.io/models/
- description: A commercially licensable model based on GPT-J and trained by Nomic
AI on the v2 GPT4All dataset.
filename: ggml-gpt4all-j-v1.2-jazzy.bin
@ -70,15 +108,6 @@
md5sum: 5b5a3f9b858d33b29b52b89692415595
owner: Nomic AI
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Llama 7b and trained by
teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.
filename: ggml-vicuna-7b-1.1-q4_2.bin
filesize: '4212859520'
license: Non commercial
owner_link: https://gpt4all.io
md5sum: 29119f8fa11712704c6b22ac5ab792ea
owner: Nomic AI
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Llama 13b and trained
by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.
filename: ggml-vicuna-13b-1.1-q4_2.bin
@ -88,15 +117,6 @@
md5sum: 95999b7b0699e2070af63bf5d34101a8
owner: Nomic AI
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Llama 7b and trained by
Microsoft and Peking University.
filename: ggml-wizardLM-7B.q4_2.bin
filesize: '4212864640'
license: Non commercial
owner_link: https://gpt4all.io
md5sum: 99e6d129745a3f1fb1121abed747b05a
owner: Nomic AI
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Llama 13b and RLHF trained
by Stable AI.
filename: ggml-stable-vicuna-13B.q4_2.bin
@ -106,15 +126,6 @@
md5sum: 6cb4ee297537c9133bddab9692879de0
owner: Nomic AI
server: https://gpt4all.io/models/
- description: A commercially licensable model base pre-trained by Mosaic ML.
filename: ggml-mpt-7b-base.bin
filesize: '4854401028'
license: Non commercial
owner_link: https://gpt4all.io
md5sum: 120c32a51d020066288df045ef5d52b9
owner: Nomic AI
requires: 2.4.1
server: https://gpt4all.io/models/
- description: A non-commercially licensable model based on Vicuna 13b, fine-tuned
on ~180,000 instructions, trained by Nous Research.
filename: ggml-nous-gpt4-vicuna-13b.bin
@ -124,12 +135,4 @@
md5sum: d5eafd5b0bd0d615cfd5fd763f642dfe
owner: gpt4all.io
server: https://gpt4all.io/models/
- description: A commericially licensable instruct model based on MPT and trained
by Mosaic ML.
filename: ggml-mpt-7b-instruct.bin
filesize: '4854401028'
license: Apache 2.0
owner_link: https://gpt4all.io
md5sum: 1cfa4958f489f0a0d1ffdf6b37322809
owner: Nomic AI
server: https://gpt4all.io/models/

View File

@ -108,6 +108,10 @@ class ModelProcess:
}
def load_backend(self, backend_name:str, install=False):
if install:
print(f"Loading backend {backend_name} install ON")
else:
print(f"Loading backend : {backend_name} install is off")
backend_path = Path("backends")/backend_name
if install:
# first find out if there is a requirements.txt file
@ -288,6 +292,7 @@ class ModelProcess:
def _generate(self, prompt, n_predict=50, callback=None):
if self.model is not None:
print(">Generating message")
self.id = self.id
if self.config["override_personality_model_parameters"]:
output = self.model.generate(
@ -712,8 +717,7 @@ class GPT4AllAPI():
# prepare query and reception
self.discussion_messages, self.current_message = self.prepare_query(message_id)
self.prepare_reception()
self.generating = True
print(">Generating message")
self.generating = True
self.process.generate(self.discussion_messages, self.current_message, message_id, n_predict = self.config['n_predict'])
self.process.started_queue.get()
while(self.process.is_generating.value): # Simulating other commands being issued

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Clone the repository to a tmp folder
REPO_URL="https://github.com/ParisNeo/PyAIPersonality.git"
@ -39,8 +39,7 @@ read -p "Enter the number of the desired personality: " SELECTED_PERSONALITY
PERSONALITY_FOLDER="$PERSONALITIES_FOLDER/${PERSONALITIES[$SELECTED_PERSONALITY]}"
# Copy the selected personality folder to personalities/language/category folder
CORRECTED_PATH="$(pwd)/.."
OUTPUT_FOLDER="$CORRECTED_PATH/personalities/${LANGUAGES[$SELECTED_LANGUAGE]}/${CATEGORIES[$SELECTED_CATEGORY]}/${PERSONALITIES[$SELECTED_PERSONALITY]}"
OUTPUT_FOLDER="$(pwd)/personalities/${LANGUAGES[$SELECTED_LANGUAGE]}/${CATEGORIES[$SELECTED_CATEGORY]}/${PERSONALITIES[$SELECTED_PERSONALITY]}"
mkdir -p "$OUTPUT_FOLDER"
cp -r "$PERSONALITY_FOLDER/." "$OUTPUT_FOLDER"

View File

@ -1,4 +1,4 @@
#!/usr/bin/bash
#!/usr/bin/env bash
echo "HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH"
echo "HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH"
echo "HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH"
@ -52,9 +52,9 @@ if ping -q -c 1 google.com >/dev/null 2>&1; then
fi
fi
# Check if repository exists
# Check if repository exists
if [[ -d .git ]] ;then
echo Pulling latest changes
echo Pulling latest changes
git pull origin main
else
if [[ -d GPT4All ]] ;then