mirror of
https://github.com/ParisNeo/lollms-webui.git
synced 2025-02-18 16:30:17 +00:00
Fixed some bugs, reorganized code
This commit is contained in:
parent
12663daca5
commit
328e60f8ae
@ -8,7 +8,7 @@
|
||||
# A simple api to communicate with gpt4all-ui and its models.
|
||||
######
|
||||
from datetime import datetime
|
||||
from gpt4all_api.db import DiscussionsDB
|
||||
from api.db import DiscussionsDB
|
||||
from pathlib import Path
|
||||
import importlib
|
||||
from pyaipersonality import AIPersonality
|
@ -249,13 +249,14 @@ class DiscussionsDB:
|
||||
discussions.append(discussion)
|
||||
return discussions
|
||||
|
||||
|
||||
def export_discussions_to_json(self, discussions_ids:list):
|
||||
# Convert the list of discussion IDs to a tuple
|
||||
discussions_ids_tuple = tuple(discussions_ids)
|
||||
db_discussions = self.select("SELECT * FROM discussion WHERE discussion_id IN ({})".format(
|
||||
','.join(['?'] * len(discussions_ids_tuple))
|
||||
))
|
||||
txt = ','.join(['?'] * len(discussions_ids_tuple))
|
||||
db_discussions = self.select(
|
||||
f"SELECT * FROM discussion WHERE id IN ({txt})",
|
||||
discussions_ids_tuple
|
||||
)
|
||||
discussions = []
|
||||
for row in db_discussions:
|
||||
discussion_id = row[0]
|
6
app.py
6
app.py
@ -25,7 +25,7 @@ from tqdm import tqdm
|
||||
import subprocess
|
||||
import signal
|
||||
from pyaipersonality import AIPersonality
|
||||
from gpt4all_api.db import DiscussionsDB, Discussion
|
||||
from api.db import DiscussionsDB, Discussion
|
||||
from flask import (
|
||||
Flask,
|
||||
Response,
|
||||
@ -58,8 +58,8 @@ logging.getLogger('werkzeug').setLevel(logging.ERROR)
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
|
||||
import time
|
||||
from gpt4all_api.config import load_config, save_config
|
||||
from gpt4all_api.api import GPT4AllAPI
|
||||
from api.config import load_config, save_config
|
||||
from api import GPT4AllAPI
|
||||
import shutil
|
||||
import markdown
|
||||
|
||||
|
@ -14,7 +14,7 @@
|
||||
######
|
||||
from pathlib import Path
|
||||
from typing import Callable
|
||||
from gpt4all_api.backend import GPTBackend
|
||||
from api.backend import GPTBackend
|
||||
import yaml
|
||||
from ctransformers import AutoModelForCausalLM
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
||||
from pathlib import Path
|
||||
from typing import Callable
|
||||
from gpt4all import GPT4All
|
||||
from gpt4all_api.backend import GPTBackend
|
||||
from api.backend import GPTBackend
|
||||
import yaml
|
||||
|
||||
__author__ = "parisneo"
|
||||
|
@ -15,7 +15,7 @@
|
||||
from pathlib import Path
|
||||
from typing import Callable
|
||||
from pygptj.model import Model
|
||||
from gpt4all_api.backend import GPTBackend
|
||||
from api.backend import GPTBackend
|
||||
|
||||
__author__ = "parisneo"
|
||||
__github__ = "https://github.com/nomic-ai/gpt4all-ui"
|
||||
|
@ -15,7 +15,7 @@
|
||||
from pathlib import Path
|
||||
from typing import Callable
|
||||
from gpt4allj import Model
|
||||
from gpt4all_api.backend import GPTBackend
|
||||
from api.backend import GPTBackend
|
||||
import yaml
|
||||
|
||||
__author__ = "parisneo"
|
||||
|
@ -11,7 +11,7 @@ from pathlib import Path
|
||||
from typing import Callable
|
||||
from transformers import AutoTokenizer, TextGenerationPipeline
|
||||
from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
|
||||
from gpt4all_api.backend import GPTBackend
|
||||
from api.backend import GPTBackend
|
||||
import torch
|
||||
import yaml
|
||||
|
||||
|
@ -14,7 +14,7 @@
|
||||
from pathlib import Path
|
||||
from typing import Callable
|
||||
from llama_cpp import Llama
|
||||
from gpt4all_api.backend import GPTBackend
|
||||
from api.backend import GPTBackend
|
||||
import yaml
|
||||
import random
|
||||
|
||||
|
@ -6,6 +6,17 @@
|
||||
owner: TheBloke
|
||||
server: https://huggingface.co/TheBloke/OpenAssistant-SFT-7-Llama-30B-GGML/resolve/main/
|
||||
sha256: 32fd44c685fbf429810db593e2db8aa42a7e1be2cd3571b6005d53b029acfcf5
|
||||
|
||||
- bestLlama: 'true'
|
||||
description: 'Manticore-13B'
|
||||
filename: Manticore-13B.ggmlv3.q4_0.bin
|
||||
license: Non commercial
|
||||
owner_link: https://huggingface.co/TheBloke
|
||||
owner: TheBloke
|
||||
server: https://huggingface.co/TheBloke/Manticore-13B-GGML/resolve/main/
|
||||
sha256: 910f3e73dc5797753313a950989c54a30342780311d64c3d4b8a37b12dd50336
|
||||
|
||||
|
||||
- bestLlama: 'true'
|
||||
description: Legacy version of Vicuna 7B v 1.1 Quantized on 4 bits
|
||||
filename: legacy-ggml-vicuna-7B-1.1-q4_0.bin
|
||||
|
@ -14,7 +14,7 @@
|
||||
from pathlib import Path
|
||||
from typing import Callable
|
||||
from pyllamacpp.model import Model
|
||||
from gpt4all_api.backend import GPTBackend
|
||||
from api.backend import GPTBackend
|
||||
import yaml
|
||||
|
||||
__author__ = "parisneo"
|
||||
|
@ -1 +0,0 @@
|
||||
|
Loading…
x
Reference in New Issue
Block a user