mirror of
https://github.com/ParisNeo/lollms-webui.git
synced 2024-12-18 20:17:50 +00:00
Added few endpoints
This commit is contained in:
parent
2137170278
commit
d249855c00
77
app.py
77
app.py
@ -106,8 +106,8 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
"/list_discussions", "list_discussions", self.list_discussions, methods=["GET"]
|
||||
)
|
||||
|
||||
self.add_endpoint("/set_personality_language", "set_personality_language", self.set_personality_language, methods=["GET"])
|
||||
self.add_endpoint("/set_personality_category", "set_personality_category", self.set_personality_category, methods=["GET"])
|
||||
self.add_endpoint("/set_personality", "set_personality", self.set_personality, methods=["GET"])
|
||||
self.add_endpoint("/delete_personality", "delete_personality", self.delete_personality, methods=["GET"])
|
||||
|
||||
|
||||
self.add_endpoint("/", "", self.index, methods=["GET"])
|
||||
@ -212,6 +212,11 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
self.add_endpoint(
|
||||
"/get_all_personalities", "get_all_personalities", self.get_all_personalities, methods=["GET"]
|
||||
)
|
||||
|
||||
self.add_endpoint(
|
||||
"/get_personality", "get_personality", self.get_personality, methods=["GET"]
|
||||
)
|
||||
|
||||
|
||||
self.add_endpoint(
|
||||
"/reset", "reset", self.reset, methods=["GET"]
|
||||
@ -251,7 +256,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
config_path = personality_folder / 'config.yaml'
|
||||
with open(config_path) as config_file:
|
||||
config_data = yaml.load(config_file, Loader=yaml.FullLoader)
|
||||
personality_info['name'] = personality_folder.name
|
||||
personality_info['name'] = config_data.get('name',"No Name")
|
||||
personality_info['description'] = config_data.get('personality_description',"")
|
||||
personality_info['author'] = config_data.get('creator', 'ParisNeo')
|
||||
personality_info['version'] = config_data.get('version', '1.0.0')
|
||||
@ -285,6 +290,47 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
|
||||
return json.dumps(personalities)
|
||||
|
||||
def get_personality():
|
||||
lang = request.args.get('language')
|
||||
category = request.args.get('category')
|
||||
name = request.args.get('name')
|
||||
personality_folder = Path("personalities")/f"{lang}"/f"{category}"/f"{name}"
|
||||
personality_path = personality_folder/f"config.yaml"
|
||||
personality_info = {}
|
||||
with open(personality_path) as config_file:
|
||||
config_data = yaml.load(config_file, Loader=yaml.FullLoader)
|
||||
personality_info['name'] = config_data.get('name',"unnamed")
|
||||
personality_info['description'] = config_data.get('personality_description',"")
|
||||
personality_info['author'] = config_data.get('creator', 'ParisNeo')
|
||||
personality_info['version'] = config_data.get('version', '1.0.0')
|
||||
scripts_path = personality_folder / 'scripts'
|
||||
personality_info['has_scripts'] = scripts_path.is_dir()
|
||||
assets_path = personality_folder / 'assets'
|
||||
gif_logo_path = assets_path / 'logo.gif'
|
||||
webp_logo_path = assets_path / 'logo.webp'
|
||||
png_logo_path = assets_path / 'logo.png'
|
||||
jpg_logo_path = assets_path / 'logo.jpg'
|
||||
jpeg_logo_path = assets_path / 'logo.jpeg'
|
||||
bmp_logo_path = assets_path / 'logo.bmp'
|
||||
|
||||
personality_info['has_logo'] = png_logo_path.is_file() or gif_logo_path.is_file()
|
||||
|
||||
if gif_logo_path.exists():
|
||||
personality_info['avatar'] = str(gif_logo_path).replace("\\","/")
|
||||
elif webp_logo_path.exists():
|
||||
personality_info['avatar'] = str(webp_logo_path).replace("\\","/")
|
||||
elif png_logo_path.exists():
|
||||
personality_info['avatar'] = str(png_logo_path).replace("\\","/")
|
||||
elif jpg_logo_path.exists():
|
||||
personality_info['avatar'] = str(jpg_logo_path).replace("\\","/")
|
||||
elif jpeg_logo_path.exists():
|
||||
personality_info['avatar'] = str(jpeg_logo_path).replace("\\","/")
|
||||
elif bmp_logo_path.exists():
|
||||
personality_info['avatar'] = str(bmp_logo_path).replace("\\","/")
|
||||
else:
|
||||
personality_info['avatar'] = ""
|
||||
return json.dumps(personality_info)
|
||||
|
||||
# Settings (data: {"setting_name":<the setting name>,"setting_value":<the setting value>})
|
||||
def update_setting(self):
|
||||
data = request.get_json()
|
||||
@ -376,7 +422,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
self.config["backend"]=data['setting_value']
|
||||
try:
|
||||
self.backend = self.process.load_backend(self.config["backend"], install=True)
|
||||
|
||||
|
||||
except Exception as ex:
|
||||
print("Couldn't build backend")
|
||||
return jsonify({'setting_name': data['setting_name'], "status":False, 'error':str(ex)})
|
||||
@ -453,15 +499,26 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
return jsonify(discussions)
|
||||
|
||||
|
||||
def set_personality_language(self):
|
||||
def set_personality(self):
|
||||
lang = request.args.get('language')
|
||||
self.config['personality_language'] = lang
|
||||
return jsonify({'success':True})
|
||||
|
||||
def set_personality_category(self):
|
||||
category = request.args.get('category')
|
||||
name = request.args.get('name')
|
||||
self.config['personality_language'] = lang
|
||||
self.config['personality_category'] = category
|
||||
return jsonify({'success':True})
|
||||
self.config['personality'] = name
|
||||
result = self.process.set_config(self.config)
|
||||
return jsonify(result)
|
||||
|
||||
def delete_personality(self):
|
||||
lang = request.args.get('language')
|
||||
category = request.args.get('category')
|
||||
name = request.args.get('name')
|
||||
path = Path("personalities")/lang/category/name
|
||||
try:
|
||||
shutil.rmtree(path)
|
||||
return jsonify({'status':'success'})
|
||||
except Exception as ex:
|
||||
return jsonify({'status':'failure','error':str(ex)})
|
||||
|
||||
def add_endpoint(
|
||||
self,
|
||||
|
@ -238,124 +238,13 @@ This Flask server provides various endpoints to manage and interact with the cha
|
||||
- "/main": GET request endpoint to start the chatbot.
|
||||
- "/settings": GET request endpoint to display the settings page.
|
||||
- "/help": GET request endpoint to display the help page.
|
||||
- "/get_all_personalities": GET request endpoint to get all personalities array
|
||||
- "/get_personality": GET request endpoint to get speciffic data based on request
|
||||
|
||||
## TODO Endpoints:
|
||||
|
||||
Here we list needed endpoints on th ebackend to make UI work as expected.
|
||||
|
||||
- "/get_all_personalities": GET request endpoint to get all personalities array
|
||||
|
||||
>This one should either return the names of installed personalieties or return same data what `"/get_current_personality"` returns.
|
||||
|
||||
|
||||
```
|
||||
[
|
||||
"personality": {
|
||||
"ai_message_prefix": "###gpt4all:\n",
|
||||
"anti_prompts": [
|
||||
"###user",
|
||||
"### user",
|
||||
"###gpt4all",
|
||||
"### gpt4all"
|
||||
],
|
||||
"assets_list": [
|
||||
"personalities\\english\\generic\\gpt4all\\assets\\logo.png"
|
||||
],
|
||||
"author": "ParisNeo",
|
||||
"category": "General",
|
||||
"dependencies": [],
|
||||
"disclaimer": "",
|
||||
"language": "en_XX",
|
||||
"link_text": "\n",
|
||||
"model_n_predicts": 1024,
|
||||
"model_repeat_last_n": 40,
|
||||
"model_repeat_penalty": 1.0,
|
||||
"model_temperature": 0.6,
|
||||
"model_top_k": 50,
|
||||
"model_top_p": 0.9,
|
||||
"name": "gpt4all",
|
||||
"personality_conditioning": "## Information:\nAssistant's name is gpt4all\nToday's date is {{date}}\n## Instructions:\nYour mission is to assist user to perform various tasks and answer his questions\n",
|
||||
"personality_description": "This personality is a helpful and Kind AI ready to help you solve your problems \n",
|
||||
"user_message_prefix": "###user:\n",
|
||||
"user_name": "user",
|
||||
"version": "1.0.0",
|
||||
"welcome_message": "Welcome! My name is gpt4all.\nHow can I help you today?\n"
|
||||
},
|
||||
"personality": {
|
||||
"ai_message_prefix": "###gpt4all:\n",
|
||||
"anti_prompts": [
|
||||
"###user",
|
||||
"### user",
|
||||
"###gpt4all",
|
||||
"### gpt4all"
|
||||
],
|
||||
"assets_list": [
|
||||
"personalities\\english\\generic\\gpt4all\\assets\\logo.png"
|
||||
],
|
||||
"author": "ParisNeo",
|
||||
"category": "General",
|
||||
"dependencies": [],
|
||||
"disclaimer": "",
|
||||
"language": "en_XX",
|
||||
"link_text": "\n",
|
||||
"model_n_predicts": 1024,
|
||||
"model_repeat_last_n": 40,
|
||||
"model_repeat_penalty": 1.0,
|
||||
"model_temperature": 0.6,
|
||||
"model_top_k": 50,
|
||||
"model_top_p": 0.9,
|
||||
"name": "gpt4all2",
|
||||
"personality_conditioning": "## Information:\nAssistant's name is gpt4all\nToday's date is {{date}}\n## Instructions:\nYour mission is to assist user to perform various tasks and answer his questions\n",
|
||||
"personality_description": "This personality is a helpful and Kind AI ready to help you solve your problems \n",
|
||||
"user_message_prefix": "###user:\n",
|
||||
"user_name": "user",
|
||||
"version": "1.0.0",
|
||||
"welcome_message": "Welcome! My name is gpt4all.\nHow can I help you today?\n"
|
||||
},
|
||||
...
|
||||
]
|
||||
```
|
||||
|
||||
OR
|
||||
|
||||
```
|
||||
[
|
||||
"gpt4all",
|
||||
"gpt4all2",
|
||||
...
|
||||
]
|
||||
```
|
||||
|
||||
- "/get_personality": GET request endpoint to get speciffic data based on request
|
||||
|
||||
Idea here is to get requested data on demand.. sometimes you want to get only assets, and match them by name from message object in discussions.
|
||||
|
||||
> Request body:
|
||||
|
||||
```
|
||||
{
|
||||
"name",
|
||||
"assets_list",
|
||||
"author",
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
> Response:
|
||||
|
||||
[
|
||||
{
|
||||
"name":"gpt4all"
|
||||
"assets_list":['img1','sfx2']
|
||||
"author":"ParisNeo"
|
||||
},
|
||||
{
|
||||
"name":"gpt4all2"
|
||||
"assets_list":['sfx1']
|
||||
"author":"Lemon"
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
# Socketio endpoints
|
||||
These are the WebSocket server endpoints that are used to handle real-time communication between the client and server using the SocketIO library.
|
||||
|
@ -210,7 +210,7 @@ class ModelProcess:
|
||||
|
||||
def rebuild_personality(self):
|
||||
try:
|
||||
print(" ******************* Building Personality from main Process *************************")
|
||||
print(f" ******************* Building Personality {self.config['personality']} from main Process *************************")
|
||||
personality_path = f"personalities/{self.config['personality_language']}/{self.config['personality_category']}/{self.config['personality']}"
|
||||
personality = AIPersonality(personality_path, run_scripts=False)
|
||||
print(f" ************ Personality {personality.name} is ready (Main process) ***************************")
|
||||
@ -224,7 +224,7 @@ class ModelProcess:
|
||||
|
||||
def _rebuild_personality(self):
|
||||
try:
|
||||
print(" ******************* Building Personality from generation Process *************************")
|
||||
print(f" ******************* Building Personality {self.config['personality']} from generation Process *************************")
|
||||
personality_path = f"personalities/{self.config['personality_language']}/{self.config['personality_category']}/{self.config['personality']}"
|
||||
self.personality = AIPersonality(personality_path)
|
||||
print(f" ************ Personality {self.personality.name} is ready (generation process) ***************************")
|
||||
@ -239,8 +239,8 @@ class ModelProcess:
|
||||
self._set_config_result['errors'].append(f"couldn't load personality:{ex}")
|
||||
|
||||
def step_callback(self, text, message_type):
|
||||
if message_type==0:
|
||||
self.generation_queue.put((text,self.id, message_type))
|
||||
self.generation_queue.put((text,self.id, message_type))
|
||||
|
||||
|
||||
def _run(self):
|
||||
self._rebuild_model()
|
||||
@ -672,7 +672,7 @@ class GPT4AllAPI():
|
||||
|
||||
return string
|
||||
|
||||
def process_chunk(self, chunk):
|
||||
def process_chunk(self, chunk, message_type):
|
||||
print(chunk,end="", flush=True)
|
||||
self.bot_says += chunk
|
||||
if not self.personality.detect_antiprompt(self.bot_says):
|
||||
@ -680,7 +680,8 @@ class GPT4AllAPI():
|
||||
'data': self.bot_says,
|
||||
'user_message_id':self.current_user_message_id,
|
||||
'ai_message_id':self.current_ai_message_id,
|
||||
'discussion_id':self.current_discussion.discussion_id
|
||||
'discussion_id':self.current_discussion.discussion_id,
|
||||
'message_type': message_type
|
||||
}
|
||||
)
|
||||
if self.cancel_gen:
|
||||
@ -723,7 +724,7 @@ class GPT4AllAPI():
|
||||
while(self.process.is_generating.value): # Simulating other commands being issued
|
||||
chunk, tok, message_type = self.process.generation_queue.get()
|
||||
if chunk!="":
|
||||
self.process_chunk(chunk)
|
||||
self.process_chunk(chunk, message_type)
|
||||
|
||||
print()
|
||||
print("## Done ##")
|
||||
|
Loading…
Reference in New Issue
Block a user