This commit is contained in:
Saifeddine ALOUI 2023-09-06 00:57:23 +02:00
parent 2f7b942a64
commit d6602ea91a
10 changed files with 112 additions and 73 deletions

View File

@ -1159,7 +1159,7 @@ class LoLLMsAPPI(LollmsApplication):
}, room=client_id
)
self.socketio.sleep(0.01)
self.connections[client_id]["current_discussion"].update_message(self.connections[client_id]["generated_text"], new_metadata=mtdt)
self.connections[client_id]["current_discussion"].update_message(self.connections[client_id]["generated_text"], new_metadata=mtdt, new_ui=ui)
def close_message(self, client_id):
# Send final message
@ -1210,7 +1210,7 @@ class LoLLMsAPPI(LollmsApplication):
self.notify(chunk,True, client_id)
ASCIIColors.info("--> Info:"+chunk)
if message_type == MSG_TYPE.MSG_TYPE_UI:
self.update_message(client_id, '', parameters, metadata, chunk, MSG_TYPE.MSG_TYPE_FULL)
self.update_message(client_id, "", parameters, metadata, chunk, MSG_TYPE.MSG_TYPE_UI)
if message_type == MSG_TYPE.MSG_TYPE_NEW_MESSAGE:
self.nb_received_tokens = 0

19
app.py
View File

@ -14,7 +14,7 @@ __github__ = "https://github.com/ParisNeo/lollms-webui"
__copyright__ = "Copyright 2023, "
__license__ = "Apache 2.0"
__version__ ="6.1"
__version__ ="6.2"
main_repo = "https://github.com/ParisNeo/lollms-webui.git"
import os
@ -208,6 +208,8 @@ class LoLLMsWebUI(LoLLMsAPPI):
# =========================================================================================
# Endpoints
# =========================================================================================
self.add_endpoint("/start_training", "start_training", self.start_training, methods=["POST"])
self.add_endpoint("/get_lollms_version", "get_lollms_version", self.get_lollms_version, methods=["GET"])
self.add_endpoint("/get_lollms_webui_version", "get_lollms_webui_version", self.get_lollms_webui_version, methods=["GET"])
@ -1370,6 +1372,21 @@ class LoLLMsWebUI(LoLLMsAPPI):
ASCIIColors.info("")
ASCIIColors.info("")
run_update_script(self.args)
def start_training(self):
data = request.get_json()
ASCIIColors.info(f"--- Trainging of model {data['model_name']} requested ---")
ASCIIColors.info(f"Cleaning memory:")
fn = self.binding.binding_folder_name
del self.binding
self.binding = None
self.model = None
for per in self.mounted_personalities:
per.model = None
gc.collect()
ASCIIColors.info(f"issuing command : python gptqlora.py --model_path {self.lollms_paths.personal_models_path/fn/data['model_name']}")
subprocess.run(["python", "gptqlora.py", "--model_path", self.lollms_paths.personal_models_path/fn/data["model_name"]],cwd=self.lollms_paths.gptqlora_path)
pass
def get_lollms_version(self):
version = pkg_resources.get_distribution('lollms').version

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

4
web/dist/index.html vendored
View File

@ -6,8 +6,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>LoLLMS WebUI - Welcome</title>
<script type="module" crossorigin src="/assets/index-c1e6772c.js"></script>
<link rel="stylesheet" href="/assets/index-a4ed7438.css">
<script type="module" crossorigin src="/assets/index-dfbea9b8.js"></script>
<link rel="stylesheet" href="/assets/index-ae0c2e02.css">
</head>
<body>
<div id="app"></div>

View File

@ -145,14 +145,14 @@
<textarea v-if="editMsgMode" ref="mdTextarea" :rows="4"
class="block p-2.5 w-full text-sm text-gray-900 bg-gray-50 rounded-lg border border-gray-300 focus:ring-blue-500 focus:border-blue-500 dark:bg-gray-700 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500"
:style="{ minHeight: mdRenderHeight + `px` }" placeholder="Enter message here..."
v-model="this.message.content"></textarea>
v-model="message.content"></textarea>
<div v-if="message.metadata !== null">
<div v-for="(metadata, index) in message.metadata" :key="'json-' + message.id + '-' + index" class="json font-bold">
<JsonViewer :jsonFormText="metadata.title" :jsonData="metadata.content" />
</div>
</div>
<DynamicUIRenderer v-if="message.ui!=null" class="w-full h-full" :code="message.ui"></DynamicUIRenderer>
<DynamicUIRenderer v-if="message.ui !== null" class="w-full h-full" :code="message_ui"></DynamicUIRenderer>
</div>
@ -475,6 +475,11 @@ export default {
},
computed: {
message_ui:{
get(){
return this.message.ui
}
},
isTalking :{
get(){
return this.isSpeaking

View File

@ -25,6 +25,7 @@ export const store = createStore({
bindingsArr:null,
modelsArr:null,
models_zoo:null,
selectedModel:null,
personalities:null,
diskUsage:null,
ramUsage:null,
@ -54,6 +55,9 @@ export const store = createStore({
setModelsArr(state, modelsArr) {
state.modelsArr = modelsArr;
},
setselectedModel(state, selectedModel) {
state.selectedModel = selectedModel;
},
setDiskUsage(state, diskUsage) {
state.diskUsage = diskUsage;
},
@ -200,7 +204,13 @@ export const store = createStore({
commit('setBindingsArr',bindingsArr)
},
async refreshModels({ commit }) {
let modelsArr = await api_get_req("list_models")
let modelsArr = await api_get_req("list_models");
let selectedModel = await api_get_req('get_active_model');
console.log("Active model " + JSON.stringify(selectedModel))
if(selectedModel!=undefined){
commit('setselectedModel',selectedModel["model"])
}
commit('setModelsArr',modelsArr)
},
async refreshExtensionsZoo({ commit }) {

View File

@ -984,7 +984,8 @@ export default {
steps: [],
parameters: null,
metadata: []
metadata: [],
ui: null
};
this.createUserMsg(usrMessage);
@ -1048,10 +1049,10 @@ export default {
console.log("JSON message")
console.log(msgObj.metadata)
messageItem.metadata = msgObj.metadata
} else if (msgObj.message_type == this.msgTypes.MSG_TYPE_JSON_UI) {
} else if (msgObj.message_type == this.msgTypes.MSG_TYPE_UI) {
console.log("UI message")
console.log(msgObj.ui)
messageItem.ui = msgObj.ui
messageItem.ui = msgObj.ui
console.log(messageItem.ui)
} else if (msgObj.message_type == this.msgTypes.MSG_TYPE_EXCEPTION) {
this.$refs.toast.showToast(msgObj.content, 5, false)
}

View File

@ -307,8 +307,6 @@ export default {
isLesteningToVoice:false,
presets:[],
selectedPreset: '',
models:{},
selectedModel: '',
cursorPosition:0,
text:"",
pre_text:"",
@ -324,6 +322,7 @@ export default {
silenceTimeout:5000
};
},
components:{
Toast,
MarkdownRenderer,
@ -331,23 +330,6 @@ export default {
Card
},
mounted() {
axios.get('list_models').then(response => {
console.log("List models "+response.data)
this.models=response.data
axios.get('get_active_model').then(response => {
console.log("Active model " + JSON.stringify(response.data))
if(response.data!=undefined){
this.selectedModel = response.data["model"]
}
}).catch(ex=>{
this.$refs.toast.showToast(`Error: ${ex}`,4,false)
});
}).catch(ex=>{
this.$refs.toast.showToast(`Error: ${ex}`,4,false)
});
axios.get('./get_presets').then(response => {
console.log(response.data)
this.presets=response.data
@ -428,6 +410,16 @@ export default {
},
computed: {
selectedModel: {
get(){
return this.$store.state.selectedModel;
}
},
models: {
get(){
return this.$store.state.modelsArr;
}
},
isTalking :{
get(){
return this.isSpeaking

View File

@ -1,16 +1,16 @@
<template>
<div class="container overflow-y-scroll flex flex-col no-scrollbar shadow-lg p-10 pt-2 bg-bg-light-tone dark:bg-bg-dark-tone">
<div v-if="selectedModel.toLowerCase().includes('gptq')" class="container overflow-y-scroll flex flex-col no-scrollbar shadow-lg p-10 pt-2 bg-bg-light-tone dark:bg-bg-dark-tone">
<form @submit.prevent="submitForm" class="">
<Card title="Training configuration" :isHorizontal="true" :disableHoverAnimation="true" :disableFocus="true">
<Card title="Model" class="" :isHorizontal="false">
<!-- Model/Tokenizer -->
<div class="mb-4">
<label for="model_name" class="text-sm">Model Name:</label>
<ClipBoardTextInput id="model_path" inputType="text" :value="model_name" />
</div>
<div class="mb-4">
<label for="tokenizer_name" class="text-sm">Tokenizer Name:</label>
<ClipBoardTextInput id="model_path" inputType="text" :value="tokenizer_name" />
<select v-model="selectedModel" @change="setModel" class="bg-white dark:bg-black m-0 border-2 rounded-md shadow-sm w-full">
<option v-for="model in models" :key="model" :value="model">
{{ model }}
</option>
</select>
</div>
</Card>
<Card title="Data" :isHorizontal="false">
@ -48,12 +48,17 @@
</Card>
</Card>
<Card :disableHoverAnimation="true" :disableFocus="true">
<button type="submit" class="bg-blue-500 text-white px-4 py-2 rounded">Train LLM</button>
<button class="bg-blue-500 text-white px-4 py-2 rounded">Start training</button>
<!-- <ProgressBar v-if="loading" :progress="progressValue" /> -->
</Card>
</form>
</div>
<div v-else>
<Card title="Info" class="" :isHorizontal="false">
Only GPTQ models are supported for QLora fine tuning. Please select a GPTQ compatible binding.
</Card>
</div>
</template>
<script>
@ -69,8 +74,6 @@ import axios from "axios";
},
data() {
return {
model_name: 'jondurbin/airoboros-7b-gpt4',
tokenizer_name: 'jondurbin/airoboros-7b-gpt4',
dataset_path: '',
max_length: 1024,
batch_size: 4,
@ -83,8 +86,7 @@ import axios from "axios";
methods: {
submitForm() {
const formData = {
model_name: this.model_name,
tokenizer_name: this.tokenizer_name,
model_name: this.selectedModel,
dataset_file: this.selectedDataset,
max_length: this.max_length,
batch_size: this.batch_size,
@ -114,6 +116,18 @@ import axios from "axios";
}
},
},
computed:{
selectedModel: {
get(){
return this.$store.state.selectedModel;
}
},
models: {
get(){
return this.$store.state.modelsArr;
}
},
},
watch: {
model_name(newVal) {
// Watch for changes to model_name and propagate them to the child component