mirror of
https://github.com/ParisNeo/lollms-webui.git
synced 2024-12-18 20:17:50 +00:00
more information
This commit is contained in:
parent
0948f2ca6e
commit
2b959c7b76
@ -1 +1 @@
|
||||
Subproject commit 27748309141a73db171a1015b499914cc6fce9ec
|
||||
Subproject commit 10dacbbb47665b1c8179d3800be3918a2e80710d
|
@ -770,6 +770,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
|
||||
binding = self.config["binding_name"],
|
||||
model = self.config["model_name"],
|
||||
personality = self.config["personalities"][self.config["active_personality_id"]],
|
||||
created_at = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
||||
) # first the content is empty, but we'll fill it at the end
|
||||
run_async(partial(
|
||||
self.sio.emit,'new_message',
|
||||
@ -819,6 +820,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
|
||||
'ui': ui,
|
||||
'discussion_id':client.discussion.discussion_id,
|
||||
'message_type': MSG_TYPE.MSG_TYPE_STEP_END.value,
|
||||
'created_at':client.discussion.current_message.created_at,
|
||||
'started_generating_at': client.discussion.current_message.started_generating_at,
|
||||
'finished_generating_at': client.discussion.current_message.finished_generating_at,
|
||||
'nb_tokens': client.discussion.current_message.nb_tokens,
|
||||
@ -836,6 +838,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
|
||||
'ui': ui,
|
||||
'discussion_id':client.discussion.discussion_id,
|
||||
'message_type': msg_type.value if msg_type is not None else MSG_TYPE.MSG_TYPE_CHUNK.value if self.nb_received_tokens>1 else MSG_TYPE.MSG_TYPE_FULL.value,
|
||||
'created_at':client.discussion.current_message.created_at,
|
||||
'started_generating_at': client.discussion.current_message.started_generating_at,
|
||||
'finished_generating_at': client.discussion.current_message.finished_generating_at,
|
||||
'nb_tokens': client.discussion.current_message.nb_tokens,
|
||||
@ -845,7 +848,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
|
||||
)
|
||||
)
|
||||
if msg_type != MSG_TYPE.MSG_TYPE_INFO:
|
||||
client.discussion.update_message(client.generated_text, new_metadata=mtdt, new_ui=ui, nb_tokens=client.discussion.current_message.nb_tokens)
|
||||
client.discussion.update_message(client.generated_text, new_metadata=mtdt, new_ui=ui, started_generating_at=client.discussion.current_message.started_generating_at, nb_tokens=client.discussion.current_message.nb_tokens)
|
||||
|
||||
|
||||
|
||||
|
File diff suppressed because one or more lines are too long
2
web/dist/index.html
vendored
2
web/dist/index.html
vendored
@ -6,7 +6,7 @@
|
||||
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>LoLLMS WebUI - Welcome</title>
|
||||
<script type="module" crossorigin src="/assets/index-80bb9b98.js"></script>
|
||||
<script type="module" crossorigin src="/assets/index-1841272f.js"></script>
|
||||
<link rel="stylesheet" href="/assets/index-49ced084.css">
|
||||
</head>
|
||||
<body>
|
||||
|
@ -228,11 +228,11 @@
|
||||
<p v-if="message.seed">Seed: <span class="font-thin">{{ message.seed }}</span></p>
|
||||
<p v-if="message.nb_tokens">Number of tokens: <span class="font-thin"
|
||||
:title="'Number of Tokens: ' + message.nb_tokens">{{ message.nb_tokens }}</span></p>
|
||||
<p v-if="wait_duration">Wait duration: <span class="font-thin"
|
||||
:title="'Wait duration: ' + wait_duration">{{ wait_duration }}</span></p>
|
||||
<p v-if="warmup_duration">Warmup duration: <span class="font-thin"
|
||||
:title="'Warmup duration: ' + warmup_duration">{{ warmup_duration }}</span></p>
|
||||
<p v-if="time_spent">Generation duration: <span class="font-thin"
|
||||
:title="'Finished generating: ' + time_spent">{{ time_spent }}</span></p>
|
||||
<p v-if="time_spent">Rate: <span class="font-thin"
|
||||
<p v-if="generation_rate">Rate: <span class="font-thin"
|
||||
:title="'Generation rate: ' + generation_rate">{{ generation_rate }}</span></p>
|
||||
</div>
|
||||
|
||||
@ -800,6 +800,8 @@ export default {
|
||||
time_spent() {
|
||||
const startTime = new Date(Date.parse(this.message.started_generating_at))
|
||||
const endTime = new Date(Date.parse(this.message.finished_generating_at))
|
||||
console.log("Computing the generation duration, ", startTime," -> ", endTime)
|
||||
|
||||
//const spentTime = new Date(endTime - startTime)
|
||||
const same = endTime.getTime() === startTime.getTime();
|
||||
if (same) {
|
||||
@ -830,13 +832,14 @@ export default {
|
||||
|
||||
|
||||
},
|
||||
wait_duration() {
|
||||
warmup_duration() {
|
||||
const createdTime = new Date(Date.parse(this.message.created_at))
|
||||
const endTime = new Date(Date.parse(this.message.finished_generating_at))
|
||||
const endTime = new Date(Date.parse(this.message.started_generating_at))
|
||||
console.log("Computing the warmup duration, ",createdTime," -> ", endTime)
|
||||
//const spentTime = new Date(endTime - startTime)
|
||||
const same = endTime.getTime() === createdTime.getTime();
|
||||
if (same) {
|
||||
return undefined
|
||||
return 0
|
||||
}
|
||||
|
||||
if (!createdTime.getTime() || !endTime.getTime()) {
|
||||
@ -868,6 +871,7 @@ export default {
|
||||
const startTime = new Date(Date.parse(this.message.started_generating_at))
|
||||
const endTime = new Date(Date.parse(this.message.finished_generating_at))
|
||||
const nb_tokens = this.message.nb_tokens
|
||||
console.log("Computing the generation rate, ", nb_tokens, " in ", startTime," -> ", endTime)
|
||||
//const spentTime = new Date(endTime - startTime)
|
||||
const same = endTime.getTime() === startTime.getTime();
|
||||
if (same) {
|
||||
|
Loading…
Reference in New Issue
Block a user