mirror of
https://github.com/ParisNeo/lollms-webui.git
synced 2024-12-18 20:17:50 +00:00
upgraded documentation
This commit is contained in:
parent
20f8476fb8
commit
bccd0c903b
21
README.md
21
README.md
@ -41,7 +41,7 @@ It's worth noting that the model has recently been launched, and it's expected t
|
||||
|
||||
# Installation and running
|
||||
|
||||
Make sure that your CPU supports `AVX2` instruction set. Without it, this application won't run out of the box. To check your CPU features, please visit the website of your CPU manufacturer for more information and look for `Instruction set extension: AVX2`.
|
||||
Make sure that your CPU supports `AVX2` instruction set. Without it, this application won't run out of the box (for the pyllamacpp backend). To check your CPU features, please visit the website of your CPU manufacturer for more information and look for `Instruction set extension: AVX2`.
|
||||
> **Note**
|
||||
>
|
||||
>Default model `gpt4all-lora-quantized-ggml.bin` is roughly 4GB in size.
|
||||
@ -60,6 +60,7 @@ Make sure that your CPU supports `AVX2` instruction set. Without it, this applic
|
||||
> **Note**
|
||||
> During installtion, it may ask you to download a model. Feel free to accept or to download your own models depending on the backends you are using.
|
||||
|
||||
|
||||
Once installed, you can run the app by using `webui.bat` or `webui.sh`. The script will check for any new updates
|
||||
|
||||
[If you want to use a more advanced install procedure, please click here](docs/usage/AdvancedInstallInstructions.md)
|
||||
@ -144,14 +145,18 @@ Just download the model into the `models/<backend name>` folder and start using
|
||||
|
||||
You can find hundreds of personalities in my personal [Personalities repository](https://github.com/ParisNeo/PyAIPersonality). This new personalities format can be used for any third party applications, it builds a simple structure and format to define personalities. This format is evolutive and new fields and assets will be added in the future like personality voice or 3d animated character with prebaked motions that should allow AI to be more alive. The format is baked to support old versions while adding new capabilities for new versions making it ideal as a personality defintition format.
|
||||
|
||||
## Personality install
|
||||
if you are on windows you can install new personalities directly using the `add_personality.bat` code:
|
||||
```bash
|
||||
add_personality.bat
|
||||
```
|
||||
### How to Install Personalities from the Zoo
|
||||
|
||||
1. Navigate to the root directory of your repository.
|
||||
2. Run either `installations/add_personality.bat` or `installations/add_personality.sh`, depending on your operating system.
|
||||
3. Select the desired language, category, and personality from the provided options.
|
||||
4. The selected personality will be added to the list of available options.
|
||||
5. Choose the current personality:
|
||||
- Option 1: Use the UI by going to "Settings" and selecting "Personalities".
|
||||
- Option 2: Update the configuration file `configs/default_local.yaml` with the appropriate language, category, and personality name.
|
||||
|
||||
Note: Ensure that you have the necessary permissions and dependencies installed before performing the above steps.
|
||||
|
||||
```bash
|
||||
bash add_personality.sh
|
||||
```
|
||||
|
||||
Please don't forget to take time and give a Star if you like the project. This helps the visibility of the project.
|
||||
|
100
backends/gptq/__init__.py
Normal file
100
backends/gptq/__init__.py
Normal file
@ -0,0 +1,100 @@
|
||||
######
|
||||
# Project : GPT4ALL-UI
|
||||
# File : backend.py
|
||||
# Author : ParisNeo with the help of the community
|
||||
# Supported by Nomic-AI
|
||||
# Licence : Apache 2.0
|
||||
# Description :
|
||||
# This is an interface class for GPT4All-ui backends.
|
||||
######
|
||||
from pathlib import Path
|
||||
from typing import Callable
|
||||
from transformers import AutoTokenizer, TextGenerationPipeline
|
||||
from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
|
||||
from pyGpt4All.backend import GPTBackend
|
||||
import torch
|
||||
import yaml
|
||||
|
||||
__author__ = "parisneo"
|
||||
__github__ = "https://github.com/ParisNeo/GPTQ_backend"
|
||||
__copyright__ = "Copyright 2023, "
|
||||
__license__ = "Apache 2.0"
|
||||
|
||||
backend_name = "GPTQ"
|
||||
|
||||
class GPTQ(GPTBackend):
|
||||
file_extension='*'
|
||||
def __init__(self, config:dict) -> None:
|
||||
"""Builds a GPTQ backend
|
||||
|
||||
Args:
|
||||
config (dict): The configuration file
|
||||
"""
|
||||
super().__init__(config, False)
|
||||
|
||||
self.model_dir = f'{config["model"]}'
|
||||
|
||||
# load quantized model, currently only support cpu or single gpu
|
||||
self.model = AutoGPTQForCausalLM.from_pretrained(self.model_dir, BaseQuantizeConfig())
|
||||
self.tokenizer = AutoTokenizer.from_pretrained(self.model_dir, use_fast=True )
|
||||
|
||||
|
||||
|
||||
def stop_generation(self):
|
||||
self.model._grab_text_callback()
|
||||
|
||||
def generate(self,
|
||||
prompt:str,
|
||||
n_predict: int = 128,
|
||||
new_text_callback: Callable[[str], None] = bool,
|
||||
verbose: bool = False,
|
||||
**gpt_params ):
|
||||
"""Generates text out of a prompt
|
||||
|
||||
Args:
|
||||
prompt (str): The prompt to use for generation
|
||||
n_predict (int, optional): Number of tokens to prodict. Defaults to 128.
|
||||
new_text_callback (Callable[[str], None], optional): A callback function that is called everytime a new text element is generated. Defaults to None.
|
||||
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
|
||||
"""
|
||||
try:
|
||||
tok = self.tokenizer.decode(self.model.generate(**self.tokenizer(prompt, return_tensors="pt").to("cuda:0"))[0])
|
||||
new_text_callback(tok)
|
||||
"""
|
||||
self.model.reset()
|
||||
for tok in self.model.generate(prompt,
|
||||
n_predict=n_predict,
|
||||
temp=self.config['temp'],
|
||||
top_k=self.config['top_k'],
|
||||
top_p=self.config['top_p'],
|
||||
repeat_penalty=self.config['repeat_penalty'],
|
||||
repeat_last_n = self.config['repeat_last_n'],
|
||||
n_threads=self.config['n_threads'],
|
||||
):
|
||||
if not new_text_callback(tok):
|
||||
return
|
||||
"""
|
||||
except Exception as ex:
|
||||
print(ex)
|
||||
|
||||
@staticmethod
|
||||
def list_models(config:dict):
|
||||
"""Lists the models for this backend
|
||||
"""
|
||||
|
||||
return [
|
||||
"EleutherAI/gpt-j-6b",
|
||||
"opt-125m-4bit"
|
||||
"TheBloke/medalpaca-13B-GPTQ-4bit",
|
||||
"TheBloke/stable-vicuna-13B-GPTQ",
|
||||
]
|
||||
@staticmethod
|
||||
def get_available_models():
|
||||
# Create the file path relative to the child class's directory
|
||||
backend_path = Path(__file__).parent
|
||||
file_path = backend_path/"models.yaml"
|
||||
|
||||
with open(file_path, 'r') as file:
|
||||
yaml_data = yaml.safe_load(file)
|
||||
|
||||
return yaml_data
|
72
backends/gptq/models.yaml
Normal file
72
backends/gptq/models.yaml
Normal file
@ -0,0 +1,72 @@
|
||||
- bestGPTJ: 'true'
|
||||
description: Current best commercially licensable model based on GPT-J and trained
|
||||
by Nomic AI on the latest curated GPT4All dataset.
|
||||
filename: ggml-gpt4all-j-v1.3-groovy.bin
|
||||
filesize: '3785248281'
|
||||
isDefault: 'true'
|
||||
md5sum: 81a09a0ddf89690372fc296ff7f625af
|
||||
- bestLlama: 'true'
|
||||
description: Current best non-commercially licensable model based on Llama 13b and
|
||||
trained by Nomic AI on the latest curated GPT4All dataset.
|
||||
filename: ggml-gpt4all-l13b-snoozy.bin
|
||||
filesize: '8136770688'
|
||||
md5sum: 91f886b68fbce697e9a3cd501951e455
|
||||
- bestMPT: 'true'
|
||||
description: Current best non-commercially licensable chat model based on MPT and
|
||||
trained by Mosaic ML.
|
||||
filename: ggml-mpt-7b-chat.bin
|
||||
filesize: '4854401050'
|
||||
isDefault: 'true'
|
||||
md5sum: 756249d3d6abe23bde3b1ae272628640
|
||||
requires: 2.4.1
|
||||
- description: A commercially licensable model based on GPT-J and trained by Nomic
|
||||
AI on the v2 GPT4All dataset.
|
||||
filename: ggml-gpt4all-j-v1.2-jazzy.bin
|
||||
filesize: '3785248281'
|
||||
md5sum: 879344aaa9d62fdccbda0be7a09e7976
|
||||
- description: A commercially licensable model based on GPT-J and trained by Nomic
|
||||
AI on the v1 GPT4All dataset.
|
||||
filename: ggml-gpt4all-j-v1.1-breezy.bin
|
||||
filesize: '3785248281'
|
||||
md5sum: 61d48a82cb188cceb14ebb8082bfec37
|
||||
- description: A commercially licensable model based on GPT-J and trained by Nomic
|
||||
AI on the v0 GPT4All dataset.
|
||||
filename: ggml-gpt4all-j.bin
|
||||
filesize: '3785248281'
|
||||
md5sum: 5b5a3f9b858d33b29b52b89692415595
|
||||
- description: A non-commercially licensable model based on Llama 7b and trained by
|
||||
teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.
|
||||
filename: ggml-vicuna-7b-1.1-q4_2.bin
|
||||
filesize: '4212859520'
|
||||
md5sum: 29119f8fa11712704c6b22ac5ab792ea
|
||||
- description: A non-commercially licensable model based on Llama 13b and trained
|
||||
by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.
|
||||
filename: ggml-vicuna-13b-1.1-q4_2.bin
|
||||
filesize: '8136770688'
|
||||
md5sum: 95999b7b0699e2070af63bf5d34101a8
|
||||
- description: A non-commercially licensable model based on Llama 7b and trained by
|
||||
Microsoft and Peking University.
|
||||
filename: ggml-wizardLM-7B.q4_2.bin
|
||||
filesize: '4212864640'
|
||||
md5sum: 99e6d129745a3f1fb1121abed747b05a
|
||||
- description: A non-commercially licensable model based on Llama 13b and RLHF trained
|
||||
by Stable AI.
|
||||
filename: ggml-stable-vicuna-13B.q4_2.bin
|
||||
filesize: '8136777088'
|
||||
md5sum: 6cb4ee297537c9133bddab9692879de0
|
||||
- description: A commercially licensable model base pre-trained by Mosaic ML.
|
||||
filename: ggml-mpt-7b-base.bin
|
||||
filesize: '4854401028'
|
||||
md5sum: 120c32a51d020066288df045ef5d52b9
|
||||
requires: 2.4.1
|
||||
- description: A non-commercially licensable model based on Vicuna 13b, fine-tuned
|
||||
on ~180,000 instructions, trained by Nous Research.
|
||||
filename: ggml-nous-gpt4-vicuna-13b.bin
|
||||
filesize: '8136777088'
|
||||
md5sum: d5eafd5b0bd0d615cfd5fd763f642dfe
|
||||
- description: A commericially licensable instruct model based on MPT and trained
|
||||
by Mosaic ML.
|
||||
filename: ggml-mpt-7b-instruct.bin
|
||||
filesize: '4854401028'
|
||||
md5sum: 1cfa4958f489f0a0d1ffdf6b37322809
|
||||
requires: 2.4.1
|
87
backends/hugging_face/__init__.py
Normal file
87
backends/hugging_face/__init__.py
Normal file
@ -0,0 +1,87 @@
|
||||
######
|
||||
# Project : GPT4ALL-UI
|
||||
# File : backend.py
|
||||
# Author : ParisNeo with the help of the community
|
||||
# Supported by Nomic-AI
|
||||
# Licence : Apache 2.0
|
||||
# Description :
|
||||
# This is an interface class for GPT4All-ui backends.
|
||||
######
|
||||
from pathlib import Path
|
||||
from typing import Callable
|
||||
from accelerate import init_empty_weights
|
||||
from accelerate import load_checkpoint_and_dispatch
|
||||
from transformers import AutoTokenizer
|
||||
from transformers import AutoConfig, AutoModelForCausalLM
|
||||
from pyGpt4All.backend import GPTBackend
|
||||
import torch
|
||||
|
||||
__author__ = "parisneo"
|
||||
__github__ = "https://github.com/ParisNeo/GPTQ_backend"
|
||||
__copyright__ = "Copyright 2023, "
|
||||
__license__ = "Apache 2.0"
|
||||
|
||||
backend_name = "HuggingFace"
|
||||
|
||||
class HuggingFace(GPTBackend):
|
||||
file_extension='*'
|
||||
def __init__(self, config:dict) -> None:
|
||||
"""Builds a HuggingFace backend
|
||||
|
||||
Args:
|
||||
config (dict): The configuration file
|
||||
"""
|
||||
super().__init__(config, False)
|
||||
|
||||
|
||||
# load quantized model, currently only support cpu or single gpu
|
||||
config_path = AutoConfig.from_pretrained(config["model"])
|
||||
self.tokenizer = AutoTokenizer.from_pretrained(config["model"])
|
||||
self.model = AutoModelForCausalLM.from_pretrained(config["model"], load_in_8bit=True, device_map='auto')
|
||||
|
||||
|
||||
def stop_generation(self):
|
||||
self.model._grab_text_callback()
|
||||
|
||||
def generate(self,
|
||||
prompt:str,
|
||||
n_predict: int = 128,
|
||||
new_text_callback: Callable[[str], None] = bool,
|
||||
verbose: bool = False,
|
||||
**gpt_params ):
|
||||
"""Generates text out of a prompt
|
||||
|
||||
Args:
|
||||
prompt (str): The prompt to use for generation
|
||||
n_predict (int, optional): Number of tokens to prodict. Defaults to 128.
|
||||
new_text_callback (Callable[[str], None], optional): A callback function that is called everytime a new text element is generated. Defaults to None.
|
||||
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
|
||||
"""
|
||||
try:
|
||||
tok = self.tokenizer.decode(self.model.generate(**self.tokenizer(prompt, return_tensors="pt").to("cuda:0"))[0])
|
||||
new_text_callback(tok)
|
||||
"""
|
||||
self.model.reset()
|
||||
for tok in self.model.generate(prompt,
|
||||
n_predict=n_predict,
|
||||
temp=self.config['temp'],
|
||||
top_k=self.config['top_k'],
|
||||
top_p=self.config['top_p'],
|
||||
repeat_penalty=self.config['repeat_penalty'],
|
||||
repeat_last_n = self.config['repeat_last_n'],
|
||||
n_threads=self.config['n_threads'],
|
||||
):
|
||||
if not new_text_callback(tok):
|
||||
return
|
||||
"""
|
||||
except Exception as ex:
|
||||
print(ex)
|
||||
|
||||
@staticmethod
|
||||
def list_models(config:dict):
|
||||
"""Lists the models for this backend
|
||||
"""
|
||||
|
||||
return [
|
||||
"EleutherAI/gpt-j-6B"
|
||||
]
|
72
backends/hugging_face/models.yaml
Normal file
72
backends/hugging_face/models.yaml
Normal file
@ -0,0 +1,72 @@
|
||||
- bestGPTJ: 'true'
|
||||
description: Current best commercially licensable model based on GPT-J and trained
|
||||
by Nomic AI on the latest curated GPT4All dataset.
|
||||
filename: ggml-gpt4all-j-v1.3-groovy.bin
|
||||
filesize: '3785248281'
|
||||
isDefault: 'true'
|
||||
md5sum: 81a09a0ddf89690372fc296ff7f625af
|
||||
- bestLlama: 'true'
|
||||
description: Current best non-commercially licensable model based on Llama 13b and
|
||||
trained by Nomic AI on the latest curated GPT4All dataset.
|
||||
filename: ggml-gpt4all-l13b-snoozy.bin
|
||||
filesize: '8136770688'
|
||||
md5sum: 91f886b68fbce697e9a3cd501951e455
|
||||
- bestMPT: 'true'
|
||||
description: Current best non-commercially licensable chat model based on MPT and
|
||||
trained by Mosaic ML.
|
||||
filename: ggml-mpt-7b-chat.bin
|
||||
filesize: '4854401050'
|
||||
isDefault: 'true'
|
||||
md5sum: 756249d3d6abe23bde3b1ae272628640
|
||||
requires: 2.4.1
|
||||
- description: A commercially licensable model based on GPT-J and trained by Nomic
|
||||
AI on the v2 GPT4All dataset.
|
||||
filename: ggml-gpt4all-j-v1.2-jazzy.bin
|
||||
filesize: '3785248281'
|
||||
md5sum: 879344aaa9d62fdccbda0be7a09e7976
|
||||
- description: A commercially licensable model based on GPT-J and trained by Nomic
|
||||
AI on the v1 GPT4All dataset.
|
||||
filename: ggml-gpt4all-j-v1.1-breezy.bin
|
||||
filesize: '3785248281'
|
||||
md5sum: 61d48a82cb188cceb14ebb8082bfec37
|
||||
- description: A commercially licensable model based on GPT-J and trained by Nomic
|
||||
AI on the v0 GPT4All dataset.
|
||||
filename: ggml-gpt4all-j.bin
|
||||
filesize: '3785248281'
|
||||
md5sum: 5b5a3f9b858d33b29b52b89692415595
|
||||
- description: A non-commercially licensable model based on Llama 7b and trained by
|
||||
teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.
|
||||
filename: ggml-vicuna-7b-1.1-q4_2.bin
|
||||
filesize: '4212859520'
|
||||
md5sum: 29119f8fa11712704c6b22ac5ab792ea
|
||||
- description: A non-commercially licensable model based on Llama 13b and trained
|
||||
by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.
|
||||
filename: ggml-vicuna-13b-1.1-q4_2.bin
|
||||
filesize: '8136770688'
|
||||
md5sum: 95999b7b0699e2070af63bf5d34101a8
|
||||
- description: A non-commercially licensable model based on Llama 7b and trained by
|
||||
Microsoft and Peking University.
|
||||
filename: ggml-wizardLM-7B.q4_2.bin
|
||||
filesize: '4212864640'
|
||||
md5sum: 99e6d129745a3f1fb1121abed747b05a
|
||||
- description: A non-commercially licensable model based on Llama 13b and RLHF trained
|
||||
by Stable AI.
|
||||
filename: ggml-stable-vicuna-13B.q4_2.bin
|
||||
filesize: '8136777088'
|
||||
md5sum: 6cb4ee297537c9133bddab9692879de0
|
||||
- description: A commercially licensable model base pre-trained by Mosaic ML.
|
||||
filename: ggml-mpt-7b-base.bin
|
||||
filesize: '4854401028'
|
||||
md5sum: 120c32a51d020066288df045ef5d52b9
|
||||
requires: 2.4.1
|
||||
- description: A non-commercially licensable model based on Vicuna 13b, fine-tuned
|
||||
on ~180,000 instructions, trained by Nous Research.
|
||||
filename: ggml-nous-gpt4-vicuna-13b.bin
|
||||
filesize: '8136777088'
|
||||
md5sum: d5eafd5b0bd0d615cfd5fd763f642dfe
|
||||
- description: A commericially licensable instruct model based on MPT and trained
|
||||
by Mosaic ML.
|
||||
filename: ggml-mpt-7b-instruct.bin
|
||||
filesize: '4854401028'
|
||||
md5sum: 1cfa4958f489f0a0d1ffdf6b37322809
|
||||
requires: 2.4.1
|
@ -1,11 +1,13 @@
|
||||
- bestGPTJ: 'true'
|
||||
description: Current best commercially licensable model based on GPT-J and trained
|
||||
by Nomic AI on the latest curated GPT4All dataset.
|
||||
filename: ggml-gpt4all-j-v1.3-groovy.bin
|
||||
filesize: '3785248281'
|
||||
isDefault: 'true'
|
||||
md5sum: 81a09a0ddf89690372fc296ff7f625af
|
||||
server: https://gpt4all.io/models/
|
||||
- bestLlama: 'false'
|
||||
description: The model who started it all
|
||||
filename: gpt4all-lora-quantized-ggml.new.bin
|
||||
md5sum: 91f886b68fbce697e9a3cd501951e455
|
||||
server: https://huggingface.co/ParisNeo/GPT4All/resolve/main/
|
||||
- bestLlama: 'false'
|
||||
description: The model who started it all (uncensored version)
|
||||
filename: gpt4all-lora-unfiltered-quantized.new.bin
|
||||
md5sum: 91f886b68fbce697e9a3cd501951e455
|
||||
server: https://huggingface.co/ParisNeo/GPT4All/resolve/main/
|
||||
- bestLlama: 'true'
|
||||
description: Current best non-commercially licensable model based on Llama 13b and
|
||||
trained by Nomic AI on the latest curated GPT4All dataset.
|
||||
@ -13,33 +15,6 @@
|
||||
filesize: '8136770688'
|
||||
md5sum: 91f886b68fbce697e9a3cd501951e455
|
||||
server: https://gpt4all.io/models/
|
||||
- bestMPT: 'true'
|
||||
description: Current best non-commercially licensable chat model based on MPT and
|
||||
trained by Mosaic ML.
|
||||
filename: ggml-mpt-7b-chat.bin
|
||||
filesize: '4854401050'
|
||||
isDefault: 'true'
|
||||
md5sum: 756249d3d6abe23bde3b1ae272628640
|
||||
requires: 2.4.1
|
||||
server: https://gpt4all.io/models/
|
||||
- description: A commercially licensable model based on GPT-J and trained by Nomic
|
||||
AI on the v2 GPT4All dataset.
|
||||
filename: ggml-gpt4all-j-v1.2-jazzy.bin
|
||||
filesize: '3785248281'
|
||||
md5sum: 879344aaa9d62fdccbda0be7a09e7976
|
||||
server: https://gpt4all.io/models/
|
||||
- description: A commercially licensable model based on GPT-J and trained by Nomic
|
||||
AI on the v1 GPT4All dataset.
|
||||
filename: ggml-gpt4all-j-v1.1-breezy.bin
|
||||
filesize: '3785248281'
|
||||
md5sum: 61d48a82cb188cceb14ebb8082bfec37
|
||||
server: https://gpt4all.io/models/
|
||||
- description: A commercially licensable model based on GPT-J and trained by Nomic
|
||||
AI on the v0 GPT4All dataset.
|
||||
filename: ggml-gpt4all-j.bin
|
||||
filesize: '3785248281'
|
||||
md5sum: 5b5a3f9b858d33b29b52b89692415595
|
||||
server: https://gpt4all.io/models/
|
||||
- description: A non-commercially licensable model based on Llama 7b and trained by
|
||||
teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.
|
||||
filename: ggml-vicuna-7b-1.1-q4_2.bin
|
||||
@ -64,22 +39,9 @@
|
||||
filesize: '8136777088'
|
||||
md5sum: 6cb4ee297537c9133bddab9692879de0
|
||||
server: https://gpt4all.io/models/
|
||||
- description: A commercially licensable model base pre-trained by Mosaic ML.
|
||||
filename: ggml-mpt-7b-base.bin
|
||||
filesize: '4854401028'
|
||||
md5sum: 120c32a51d020066288df045ef5d52b9
|
||||
requires: 2.4.1
|
||||
server: https://gpt4all.io/models/
|
||||
- description: A non-commercially licensable model based on Vicuna 13b, fine-tuned
|
||||
on ~180,000 instructions, trained by Nous Research.
|
||||
filename: ggml-nous-gpt4-vicuna-13b.bin
|
||||
filesize: '8136777088'
|
||||
md5sum: d5eafd5b0bd0d615cfd5fd763f642dfe
|
||||
server: https://gpt4all.io/models/
|
||||
- description: A commericially licensable instruct model based on MPT and trained
|
||||
by Mosaic ML.
|
||||
filename: ggml-mpt-7b-instruct.bin
|
||||
filesize: '4854401028'
|
||||
md5sum: 1cfa4958f489f0a0d1ffdf6b37322809
|
||||
requires: 2.4.1
|
||||
server: https://gpt4all.io/models/
|
||||
|
Loading…
Reference in New Issue
Block a user