mirror of
https://github.com/ParisNeo/lollms-webui.git
synced 2024-12-18 20:17:50 +00:00
Added and compiled with playground
This commit is contained in:
parent
c10621107a
commit
a61b31d386
158
api/__init__.py
158
api/__init__.py
@ -507,6 +507,164 @@ class LoLLMsAPPI(LollmsApplication):
|
||||
}, room=client_id
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
# A copy of the original lollms-server generation code needed for playground
|
||||
@self.socketio.on('generate_text')
|
||||
def handle_generate_text(data):
|
||||
client_id = request.sid
|
||||
ASCIIColors.info(f"Text generation requested by client: {client_id}")
|
||||
if self.buzy:
|
||||
emit("buzzy", {"message":"I am buzzy. Come back later."}, room=client_id)
|
||||
self.socketio.sleep(0)
|
||||
ASCIIColors.warning(f"OOps request {client_id} refused!! Server buzy")
|
||||
return
|
||||
def generate_text():
|
||||
self.buzy = True
|
||||
try:
|
||||
model = self.model
|
||||
self.connections[client_id]["is_generating"]=True
|
||||
self.connections[client_id]["requested_stop"]=False
|
||||
prompt = data['prompt']
|
||||
personality_id = data['personality']
|
||||
n_predicts = data["n_predicts"]
|
||||
parameters = data.get("parameters",{
|
||||
"temperature":self.config["temperature"],
|
||||
"top_k":self.config["top_k"],
|
||||
"top_p":self.config["top_p"],
|
||||
"repeat_penalty":self.config["repeat_penalty"],
|
||||
"repeat_last_n":self.config["repeat_last_n"],
|
||||
"seed":self.config["seed"]
|
||||
})
|
||||
|
||||
if personality_id==-1:
|
||||
# Raw text generation
|
||||
self.answer = {"full_text":""}
|
||||
def callback(text, message_type: MSG_TYPE, metadata:dict={}):
|
||||
if message_type == MSG_TYPE.MSG_TYPE_CHUNK:
|
||||
ASCIIColors.success(f"generated:{len(self.answer['full_text'].split())} words", end='\r')
|
||||
self.answer["full_text"] = self.answer["full_text"] + text
|
||||
self.socketio.emit('text_chunk', {'chunk': text, 'type':MSG_TYPE.MSG_TYPE_CHUNK.value}, room=client_id)
|
||||
self.socketio.sleep(0)
|
||||
if client_id in self.connections:# Client disconnected
|
||||
if self.connections[client_id]["requested_stop"]:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
tk = model.tokenize(prompt)
|
||||
n_tokens = len(tk)
|
||||
fd = model.detokenize(tk[-min(self.config.ctx_size-n_predicts,n_tokens):])
|
||||
|
||||
try:
|
||||
ASCIIColors.print("warming up", ASCIIColors.color_bright_cyan)
|
||||
generated_text = model.generate(fd,
|
||||
n_predict=n_predicts,
|
||||
callback=callback,
|
||||
temperature = parameters["temperature"],
|
||||
top_k = parameters["top_k"],
|
||||
top_p = parameters["top_p"],
|
||||
repeat_penalty = parameters["repeat_penalty"],
|
||||
repeat_last_n = parameters["repeat_last_n"],
|
||||
seed = parameters["seed"]
|
||||
)
|
||||
ASCIIColors.success(f"\ndone")
|
||||
if client_id in self.connections:
|
||||
if not self.connections[client_id]["requested_stop"]:
|
||||
# Emit the generated text to the client
|
||||
self.socketio.emit('text_generated', {'text': generated_text}, room=client_id)
|
||||
self.socketio.sleep(0)
|
||||
except Exception as ex:
|
||||
self.socketio.emit('generation_error', {'error': str(ex)}, room=client_id)
|
||||
ASCIIColors.error(f"\ndone")
|
||||
self.buzy = False
|
||||
else:
|
||||
try:
|
||||
personality: AIPersonality = self.personalities[personality_id]
|
||||
ump = self.config.discussion_prompt_separator +self.config.user_name+": " if self.config.use_user_name_in_discussions else self.personality.user_message_prefix
|
||||
personality.model = model
|
||||
cond_tk = personality.model.tokenize(personality.personality_conditioning)
|
||||
n_cond_tk = len(cond_tk)
|
||||
# Placeholder code for text generation
|
||||
# Replace this with your actual text generation logic
|
||||
print(f"Text generation requested by client: {client_id}")
|
||||
|
||||
self.answer["full_text"] = ''
|
||||
full_discussion_blocks = self.connections[client_id]["full_discussion_blocks"]
|
||||
|
||||
if prompt != '':
|
||||
if personality.processor is not None and personality.processor_cfg["process_model_input"]:
|
||||
preprocessed_prompt = personality.processor.process_model_input(prompt)
|
||||
else:
|
||||
preprocessed_prompt = prompt
|
||||
|
||||
if personality.processor is not None and personality.processor_cfg["custom_workflow"]:
|
||||
full_discussion_blocks.append(ump)
|
||||
full_discussion_blocks.append(preprocessed_prompt)
|
||||
|
||||
else:
|
||||
|
||||
full_discussion_blocks.append(ump)
|
||||
full_discussion_blocks.append(preprocessed_prompt)
|
||||
full_discussion_blocks.append(personality.link_text)
|
||||
full_discussion_blocks.append(personality.ai_message_prefix)
|
||||
|
||||
full_discussion = personality.personality_conditioning + ''.join(full_discussion_blocks)
|
||||
|
||||
def callback(text, message_type: MSG_TYPE, metadata:dict={}):
|
||||
if message_type == MSG_TYPE.MSG_TYPE_CHUNK:
|
||||
self.answer["full_text"] = self.answer["full_text"] + text
|
||||
self.socketio.emit('text_chunk', {'chunk': text}, room=client_id)
|
||||
self.socketio.sleep(0)
|
||||
try:
|
||||
if self.connections[client_id]["requested_stop"]:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
except: # If the client is disconnected then we stop talking to it
|
||||
return False
|
||||
|
||||
tk = personality.model.tokenize(full_discussion)
|
||||
n_tokens = len(tk)
|
||||
fd = personality.model.detokenize(tk[-min(self.config.ctx_size-n_cond_tk-personality.model_n_predicts,n_tokens):])
|
||||
|
||||
if personality.processor is not None and personality.processor_cfg["custom_workflow"]:
|
||||
ASCIIColors.info("processing...")
|
||||
generated_text = personality.processor.run_workflow(prompt, previous_discussion_text=personality.personality_conditioning+fd, callback=callback)
|
||||
else:
|
||||
ASCIIColors.info("generating...")
|
||||
generated_text = personality.model.generate(
|
||||
personality.personality_conditioning+fd,
|
||||
n_predict=personality.model_n_predicts,
|
||||
callback=callback)
|
||||
|
||||
if personality.processor is not None and personality.processor_cfg["process_model_output"]:
|
||||
generated_text = personality.processor.process_model_output(generated_text)
|
||||
|
||||
full_discussion_blocks.append(generated_text.strip())
|
||||
ASCIIColors.success("\ndone")
|
||||
|
||||
# Emit the generated text to the client
|
||||
self.socketio.emit('text_generated', {'text': generated_text}, room=client_id)
|
||||
self.socketio.sleep(0)
|
||||
except Exception as ex:
|
||||
self.socketio.emit('generation_error', {'error': str(ex)}, room=client_id)
|
||||
ASCIIColors.error(f"\ndone")
|
||||
self.buzy = False
|
||||
except Exception as ex:
|
||||
trace_exception(ex)
|
||||
self.socketio.emit('generation_error', {'error': str(ex)}, room=client_id)
|
||||
self.buzy = False
|
||||
|
||||
# Start the text generation task in a separate thread
|
||||
task = self.socketio.start_background_task(target=generate_text)
|
||||
|
||||
|
||||
|
||||
|
||||
@socketio.on('generate_msg')
|
||||
def generate_msg(data):
|
||||
client_id = request.sid
|
||||
|
34
app.py
34
app.py
@ -409,7 +409,41 @@ class LoLLMsWebUI(LoLLMsAPPI):
|
||||
"/import_multiple_discussions", "import_multiple_discussions", self.import_multiple_discussions, methods=["POST"]
|
||||
)
|
||||
|
||||
self.add_endpoint(
|
||||
"/presets.json", "presets.json", self.get_presets, methods=["GET"]
|
||||
)
|
||||
|
||||
self.add_endpoint(
|
||||
"/save_presets", "save_presets", self.save_presets, methods=["POST"]
|
||||
)
|
||||
|
||||
def get_presets(self):
|
||||
presets_file = self.lollms_paths.personal_databases_path/"presets.json"
|
||||
if not presets_file.exists():
|
||||
shutil.copy("presets/presets.json",presets_file)
|
||||
with open(presets_file) as f:
|
||||
data = json.loads(f.read())
|
||||
return jsonify(data)
|
||||
|
||||
def save_presets(self):
|
||||
"""Saves a preset to a file.
|
||||
|
||||
Args:
|
||||
None.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
"""
|
||||
|
||||
# Get the JSON data from the POST request.
|
||||
preset_data = request.get_json()
|
||||
|
||||
presets_file = self.lollms_paths.personal_databases_path/"presets.json"
|
||||
# Save the JSON data to a file.
|
||||
with open(presets_file, "w") as f:
|
||||
json.dump(preset_data, f, indent=4)
|
||||
|
||||
return "Preset saved successfully!"
|
||||
|
||||
def export_multiple_discussions(self):
|
||||
data = request.get_json()
|
||||
|
7
presets/presets.json
Normal file
7
presets/presets.json
Normal file
@ -0,0 +1,7 @@
|
||||
{
|
||||
"Writing a Book in Latex": "<Add some context information to give the AI some context about the book or leave blank if you have no specific idea>\n```latex\n\\documentclass[12pt]{book}\n\\usepackage{url}\n\\begin{document}\n\\title{<Put the title of the book here>}\n\\author{<Put the author name here>} % Author\n\\date{\\today} % Date\n\\maketitle\n\\tableofcontents\n\\chapter{Introduction}\n<Add any required text then press generate to push the AI to build your book>\n",
|
||||
"Simple Book writing":"Once apon a time",
|
||||
"Simple Question Answer":"User:<Put your question here>\nAssistant:",
|
||||
"Question Answer with conditionning":"Assistant is a highly developed AI capable of answering any question about any subject.\nUser:<Put your question here>\nAssistant:",
|
||||
"Instruct mode": "Instructions:\n<Put your instructions here>\nAnswer:"
|
||||
}
|
@ -186,7 +186,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
Copyright 2023 Saifeddine ALOUI (aka parisneo)
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
8
web/dist/assets/index-3e7ad0ad.css
vendored
8
web/dist/assets/index-3e7ad0ad.css
vendored
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
8
web/dist/assets/index-edffcce0.css
vendored
Normal file
8
web/dist/assets/index-edffcce0.css
vendored
Normal file
File diff suppressed because one or more lines are too long
4
web/dist/index.html
vendored
4
web/dist/index.html
vendored
@ -6,8 +6,8 @@
|
||||
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>LoLLMS WebUI - Welcome</title>
|
||||
<script type="module" crossorigin src="/assets/index-7801f2bd.js"></script>
|
||||
<link rel="stylesheet" href="/assets/index-3e7ad0ad.css">
|
||||
<script type="module" crossorigin src="/assets/index-76118a1a.js"></script>
|
||||
<link rel="stylesheet" href="/assets/index-edffcce0.css">
|
||||
</head>
|
||||
<body>
|
||||
<div id="app"></div>
|
||||
|
@ -7,6 +7,11 @@
|
||||
Discussions
|
||||
</RouterLink>
|
||||
</li>
|
||||
<li>
|
||||
<RouterLink :to="{ name: 'playground' }" class="link-item dark:link-item-dark">
|
||||
Playground
|
||||
</RouterLink>
|
||||
</li>
|
||||
<li>
|
||||
<RouterLink :to="{ name: 'settings' }" class="link-item dark:link-item-dark">
|
||||
Settings
|
||||
|
@ -1,4 +1,5 @@
|
||||
import { createRouter, createWebHistory } from 'vue-router'
|
||||
import PlayGroundView from '../views/PlayGroundView.vue'
|
||||
import ExtensionsView from '../views/ExtensionsView.vue'
|
||||
import HelpView from '../views/HelpView.vue'
|
||||
import SettingsView from '../views/SettingsView.vue'
|
||||
@ -10,6 +11,11 @@ import DiscussionsView from '../views/DiscussionsView.vue'
|
||||
const router = createRouter({
|
||||
history: createWebHistory(import.meta.env.BASE_URL),
|
||||
routes: [
|
||||
{
|
||||
path: '/playground/',
|
||||
name: 'playground',
|
||||
component: PlayGroundView
|
||||
},
|
||||
{
|
||||
path: '/extensions/',
|
||||
name: 'extensions',
|
||||
|
@ -1,29 +1,32 @@
|
||||
<template>
|
||||
<div class="container mx-auto p-4 bg-gray-100 shadow-lg">
|
||||
<div class="flex justify-between">
|
||||
<div class="m-0">
|
||||
<button id="generate-button" @click="generate" class="bg-blue-500 hover:bg-blue-600 active:bg-blue-700 text-white font-bold py-2 px-4 rounded ml-2">Generate Text</button>
|
||||
<button v-if="!generating" id="stop-button" @click="stopGeneration" class="bg-red-500 hover:bg-red-600 active:bg-red-700 text-white font-bold py-2 px-4 rounded hidden ml-2 ">Stop Generation</button>
|
||||
<button v-else id="export-button" @click="exportText" class="bg-green-500 hover:bg-green-600 active:bg-green-700 text-white font-bold py-2 px-4 rounded ml-2">Export Text</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex flex-row h-full">
|
||||
<div class="flex-grow ml-2">
|
||||
<div class="mt-4 d-flex justify-content-space-between flex-row">
|
||||
<label class="mt-2">Presets</label>
|
||||
<select v-model="selectedPreset" class="w-25 m-2">
|
||||
<select v-model="selectedPreset" class="w-25 m-2 border-2 rounded-md shadow-sm">
|
||||
<option v-for="preset in Object.keys(presets)" :key="preset" :value="preset">
|
||||
{{ preset }}
|
||||
</option>
|
||||
</select>
|
||||
<button class="bg-red-500 hover:bg-red-600 active:bg-red-700 text-white font-bold py-2 px-4 rounded ml-2 " @click="setPreset">Use preset</button>
|
||||
<button class="bg-green-500 hover:bg-green-600 active:bg-red-700 text-white font-bold py-2 px-4 rounded ml-2 " @click="addPreset">Add preset</button>
|
||||
<button class="bg-red-500 hover:bg-red-600 active:bg-red-700 text-white font-bold py-2 px-4 rounded ml-2" @click="removePreset">Remove preset</button>
|
||||
<button class="bg-red-500 hover:bg-red-600 active:bg-red-700 text-white font-bold py-2 px-4 rounded ml-2" @click="removePreset">Save preset</button>
|
||||
<button class="bg-green-500 hover:bg-green-600 active:bg-red-700 text-white font-bold py-2 px-4 rounded ml-2 " @click="setPreset" title="Use preset"><i data-feather="check"></i></button>
|
||||
<button class="bg-green-500 hover:bg-green-600 active:bg-red-700 text-white font-bold py-2 px-4 rounded ml-2 " @click="addPreset" title="Add this text as a preset"><i data-feather="plus"></i></button>
|
||||
<button class="bg-red-500 hover:bg-red-600 active:bg-red-700 text-white font-bold py-2 px-4 rounded ml-2" @click="removePreset" title="Remove preset"><i data-feather="x"></i></button>
|
||||
<button class="bg-green-500 hover:bg-green-600 active:bg-red-700 text-white font-bold py-2 px-4 rounded ml-2" @click="savePreset" title="Save presets list"><i data-feather="save"></i></button>
|
||||
<button class="bg-green-500 hover:bg-green-600 active:bg-red-700 text-white font-bold py-2 px-4 rounded ml-2" @click="reloadPresets" title="Reload presets list"><i data-feather="refresh-ccw"></i></button>
|
||||
|
||||
</div>
|
||||
<div class="flex-grow">
|
||||
<textarea v-model="text" id="text_element" class="mt-4 p-2 border border-gray-300 rounded-md h-64 overflow-y-scroll w-full" type="text"></textarea>
|
||||
</div>
|
||||
<div class="flex justify-between">
|
||||
<div class="m-0">
|
||||
<button v-show="!generating" id="generate-button" @click="generate" class="bg-blue-500 hover:bg-blue-600 active:bg-blue-700 text-white font-bold py-2 px-4 rounded ml-2">Generate Text</button>
|
||||
<button v-show="generating" id="stop-button" @click="stopGeneration" class="bg-red-500 hover:bg-red-600 active:bg-red-700 text-white font-bold py-2 px-4 rounded ml-2 ">Stop Generation</button>
|
||||
<button v-show="!generating" id="export-button" @click="exportText" class="bg-green-500 hover:bg-green-600 active:bg-green-700 text-white font-bold py-2 px-4 rounded ml-2">Export Text</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<div id="settings" class="border border-blue-300 bg-blue-200 mt-4 w-25 mr-2 h-full mb-10" style="align-items: center; height: fit-content; margin: 10px; box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); border-radius: 4px;">
|
||||
<div id="title" class="border border-blue-600 bg-blue-300 m-0" style="align-items: center; height: fit-content; margin: 10px; box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); border-radius: 4px;">
|
||||
@ -54,6 +57,12 @@
|
||||
<input type="range" v-model="repeat_last_n" min="0" max="100" step="1" class="w-full">
|
||||
<span class="slider-value text-gray-500">Current value: {{ repeat_last_n }}</span>
|
||||
</div>
|
||||
|
||||
<div class="slider-container ml-2 mr-2">
|
||||
<h3 class="text-gray-600">Number of tokens to generate</h3>
|
||||
<input type="number" v-model="n_predicts" class="w-full">
|
||||
<span class="slider-value text-gray-500">Current value: {{ n_predicts }}</span>
|
||||
</div>
|
||||
<div class="slider-container ml-2 mr-2">
|
||||
<h3 class="text-gray-600">Seed</h3>
|
||||
<input type="number" v-model="seed" class="w-full">
|
||||
@ -68,6 +77,8 @@
|
||||
</template>
|
||||
|
||||
<script>
|
||||
|
||||
import feather from 'feather-icons'
|
||||
import axios from "axios";
|
||||
import socket from '@/services/websocket.js'
|
||||
import Toast from '../components/Toast.vue'
|
||||
@ -84,16 +95,25 @@ export default {
|
||||
top_p: 0.9,
|
||||
repeat_penalty: 1.3,
|
||||
repeat_last_n: 50,
|
||||
n_predicts: 2000,
|
||||
seed: -1,
|
||||
};
|
||||
},
|
||||
components:{
|
||||
Toast,
|
||||
},
|
||||
mounted() {
|
||||
//console.log('chatbox mnt',this.$refs)
|
||||
this.$nextTick(() => {
|
||||
feather.replace();
|
||||
});
|
||||
},
|
||||
created(){
|
||||
axios.get('./presets.json').then(response => {
|
||||
console.log(response.data)
|
||||
this.presets=response.data
|
||||
}).catch(ex=>{
|
||||
this.$refs.toast.showToast(`Error: ${ex}`,4,false)
|
||||
});
|
||||
// Event handler for receiving generated text chunks
|
||||
socket.on('text_chunk', data => {
|
||||
@ -149,7 +169,7 @@ export default {
|
||||
var prompt = this.text
|
||||
console.log(prompt)
|
||||
// Trigger the 'generate_text' event with the prompt
|
||||
socket.emit('generate_text', { prompt: prompt, personality: -1, n_predicts: 1024 ,
|
||||
socket.emit('generate_text', { prompt: prompt, personality: -1, n_predicts: this.n_predicts ,
|
||||
parameters: {
|
||||
temperature: this.temperature,
|
||||
top_k: this.top_k,
|
||||
@ -167,7 +187,14 @@ export default {
|
||||
socket.emit('cancel_generation',{});
|
||||
},
|
||||
exportText(){
|
||||
|
||||
const textToExport = this.text;
|
||||
const element = document.createElement('a');
|
||||
const file = new Blob([textToExport], {type: 'text/plain'});
|
||||
element.href = URL.createObjectURL(file);
|
||||
element.download = 'exported_text.txt';
|
||||
document.body.appendChild(element);
|
||||
element.click();
|
||||
document.body.removeChild(element);
|
||||
},
|
||||
setPreset() {
|
||||
this.text = this.presets[this.selectedPreset];
|
||||
@ -182,11 +209,19 @@ export default {
|
||||
}
|
||||
},
|
||||
savePreset() {
|
||||
axios.post("/save_preset", presets).then((response) => {
|
||||
axios.post("/save_presets", this.presets).then((response) => {
|
||||
console.log(response);
|
||||
this.$refs.toast.showToast(`Preset saved`,4,true)
|
||||
this.$refs.toast.showToast(`Presets saved`,4,true)
|
||||
});
|
||||
}
|
||||
},
|
||||
reloadPresets() {
|
||||
axios.get('./presets.json').then(response => {
|
||||
console.log(response.data)
|
||||
this.presets=response.data
|
||||
}).catch(ex=>{
|
||||
this.$refs.toast.showToast(`Error: ${ex}`,4,false)
|
||||
});
|
||||
},
|
||||
}
|
||||
};
|
||||
</script>
|
||||
|
Loading…
Reference in New Issue
Block a user