mirror of
https://github.com/ParisNeo/lollms-webui.git
synced 2024-12-20 04:47:55 +00:00
sync
This commit is contained in:
parent
7401c201b0
commit
3c7c1953d4
@ -46,7 +46,7 @@ class LollmsClient {
|
|||||||
this.host_address = host_address;
|
this.host_address = host_address;
|
||||||
this.model_name = model_name;
|
this.model_name = model_name;
|
||||||
this.ctx_size = ctx_size;
|
this.ctx_size = ctx_size;
|
||||||
this.n_predict = n_predict;
|
this.n_predict = n_predict?n_predict:4096;
|
||||||
this.personality = personality;
|
this.personality = personality;
|
||||||
this.temperature = temperature;
|
this.temperature = temperature;
|
||||||
this.top_k = top_k;
|
this.top_k = top_k;
|
||||||
@ -118,7 +118,7 @@ class LollmsClient {
|
|||||||
const output = await axios.post("/lollms_detokenize", {"tokens": tokensList});
|
const output = await axios.post("/lollms_detokenize", {"tokens": tokensList});
|
||||||
console.log(output.data.text)
|
console.log(output.data.text)
|
||||||
return output.data.text
|
return output.data.text
|
||||||
}
|
}
|
||||||
generate(prompt, {
|
generate(prompt, {
|
||||||
n_predict = null,
|
n_predict = null,
|
||||||
stream = false,
|
stream = false,
|
||||||
@ -145,6 +145,26 @@ class LollmsClient {
|
|||||||
throw new Error('Invalid generation mode');
|
throw new Error('Invalid generation mode');
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
generate_with_images(prompt, images, {
|
||||||
|
n_predict = null,
|
||||||
|
stream = false,
|
||||||
|
temperature = 0.1,
|
||||||
|
top_k = 50,
|
||||||
|
top_p = 0.95,
|
||||||
|
repeat_penalty = 0.8,
|
||||||
|
repeat_last_n = 40,
|
||||||
|
seed = null,
|
||||||
|
n_threads = 8,
|
||||||
|
service_key = "",
|
||||||
|
streamingCallback = null
|
||||||
|
} = {}) {
|
||||||
|
switch (this.default_generation_mode) {
|
||||||
|
case ELF_GENERATION_FORMAT.LOLLMS:
|
||||||
|
return this.lollms_generate_with_images(prompt, images, this.host_address, this.model_name, -1, n_predict, stream, temperature, top_k, top_p, repeat_penalty, repeat_last_n, seed, n_threads, service_key, streamingCallback);
|
||||||
|
default:
|
||||||
|
throw new Error('Invalid generation mode');
|
||||||
|
}
|
||||||
|
}
|
||||||
async generateText(prompt, options = {}) {
|
async generateText(prompt, options = {}) {
|
||||||
// Destructure with default values from `this` if not provided in `options`
|
// Destructure with default values from `this` if not provided in `options`
|
||||||
const {
|
const {
|
||||||
@ -203,12 +223,13 @@ class LollmsClient {
|
|||||||
} : {
|
} : {
|
||||||
'Content-Type': 'application/json',
|
'Content-Type': 'application/json',
|
||||||
};
|
};
|
||||||
|
console.log("n_predict:",n_predict)
|
||||||
|
console.log("self.n_predict:",this.n_predict)
|
||||||
const data = JSON.stringify({
|
const data = JSON.stringify({
|
||||||
prompt: prompt,
|
prompt: prompt,
|
||||||
model_name: model_name,
|
model_name: model_name,
|
||||||
personality: personality,
|
personality: personality,
|
||||||
n_predict: n_predict?n_predict:self.n_predict,
|
n_predict: n_predict?n_predict:this.n_predict,
|
||||||
stream: stream,
|
stream: stream,
|
||||||
temperature: temperature,
|
temperature: temperature,
|
||||||
top_k: top_k,
|
top_k: top_k,
|
||||||
@ -240,7 +261,59 @@ class LollmsClient {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
async lollms_generate_with_images(prompt, images, host_address = this.host_address, model_name = this.model_name, personality = this.personality, n_predict = this.n_predict, stream = false, temperature = this.temperature, top_k = this.top_k, top_p = this.top_p, repeat_penalty = this.repeat_penalty, repeat_last_n = this.repeat_last_n, seed = this.seed, n_threads = this.n_threads, service_key = this.service_key, streamingCallback = null) {
|
||||||
|
let url;
|
||||||
|
if(host_address!=null){
|
||||||
|
url = `${host_address}/lollms_generate_with_images`;
|
||||||
|
}
|
||||||
|
else{
|
||||||
|
url = `/lollms_generate_with_images`;
|
||||||
|
}
|
||||||
|
const headers = service_key !== "" ? {
|
||||||
|
'Content-Type': 'application/json; charset=utf-8',
|
||||||
|
'Authorization': `Bearer ${service_key}`,
|
||||||
|
} : {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
};
|
||||||
|
console.log("n_predict:",n_predict)
|
||||||
|
console.log("self.n_predict:",this.n_predict)
|
||||||
|
const data = JSON.stringify({
|
||||||
|
prompt: prompt,
|
||||||
|
images: images,
|
||||||
|
model_name: model_name,
|
||||||
|
personality: personality,
|
||||||
|
n_predict: n_predict?n_predict:this.n_predict,
|
||||||
|
stream: stream,
|
||||||
|
temperature: temperature,
|
||||||
|
top_k: top_k,
|
||||||
|
top_p: top_p,
|
||||||
|
repeat_penalty: repeat_penalty,
|
||||||
|
repeat_last_n: repeat_last_n,
|
||||||
|
seed: seed,
|
||||||
|
n_threads: n_threads
|
||||||
|
});
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await fetch(url, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: headers,
|
||||||
|
body: data
|
||||||
|
});
|
||||||
|
|
||||||
|
// Check if the response is okay
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error('Network response was not ok ' + response.statusText);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the response as plaintext
|
||||||
|
const responseBody = await response.text();
|
||||||
|
console.log(responseBody)
|
||||||
|
return responseBody ;
|
||||||
|
} catch (error) {
|
||||||
|
console.error(error);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
async openai_generate(prompt, host_address = this.host_address, model_name = this.model_name, personality = this.personality, n_predict = this.n_predict, stream = false, temperature = this.temperature, top_k = this.top_k, top_p = this.top_p, repeat_penalty = this.repeat_penalty, repeat_last_n = this.repeat_last_n, seed = this.seed, n_threads = this.n_threads, ELF_COMPLETION_FORMAT = "vllm instruct", service_key = this.service_key, streamingCallback = null) {
|
async openai_generate(prompt, host_address = this.host_address, model_name = this.model_name, personality = this.personality, n_predict = this.n_predict, stream = false, temperature = this.temperature, top_k = this.top_k, top_p = this.top_p, repeat_penalty = this.repeat_penalty, repeat_last_n = this.repeat_last_n, seed = this.seed, n_threads = this.n_threads, ELF_COMPLETION_FORMAT = "vllm instruct", service_key = this.service_key, streamingCallback = null) {
|
||||||
|
@ -1 +1 @@
|
|||||||
Subproject commit 3ddec9895d34cd66e25b09cdb15cd03e651c6a22
|
Subproject commit 716b9fa86cd46375f23b9b3ac3ecf74ecc617356
|
Loading…
Reference in New Issue
Block a user