This commit is contained in:
ParisNeo 2023-05-11 22:27:23 +02:00
parent 325cd5b627
commit 59f9413253
4 changed files with 150 additions and 36 deletions

16
app.py
View File

@ -230,27 +230,33 @@ class Gpt4AllWebUI(GPT4AllAPI):
response = requests.get(model_path, stream=True)
file_size = int(response.headers.get('Content-Length'))
downloaded_size = 0
CHUNK_SIZE = 4096
CHUNK_SIZE = 8192
def download_chunk(url, start_byte, end_byte, fileobj):
headers = {'Range': f'bytes={start_byte}-{end_byte}'}
response = requests.get(url, headers=headers, stream=True)
downloaded_bytes = 0
for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
if chunk:
fileobj.seek(start_byte)
fileobj.write(chunk)
downloaded_bytes += len(chunk)
start_byte += len(chunk)
return downloaded_bytes
def download_file(url, file_path, num_threads=4):
response = requests.head(url)
file_size = int(response.headers.get('Content-Length'))
chunk_size = file_size // num_threads
progress = 0
with open(file_path, 'wb') as fileobj:
with tqdm(total=file_size, unit='B', unit_scale=True, unit_divisor=1024) as pbar:
with ThreadPoolExecutor(max_workers=num_threads) as executor:
futures = []
chunk_size = file_size // num_threads
for i in range(num_threads):
start_byte = i * chunk_size
@ -258,8 +264,10 @@ class Gpt4AllWebUI(GPT4AllAPI):
futures.append(executor.submit(download_chunk, url, start_byte, end_byte, fileobj))
for future in tqdm(as_completed(futures), total=num_threads):
future.result()
pbar.update(chunk_size)
downloaded_bytes = future.result()
progress += downloaded_bytes
pbar.update(downloaded_bytes)
socketio.emit('install_progress', {'status': 'progress', 'progress': progress})
# Usage example
download_file(model_path, installation_path, num_threads=4)

View File

@ -53,6 +53,7 @@ export default {
isInstalled: Boolean,
onInstall: Function,
onUninstall: Function,
onSelected: Function,
selected: Boolean
},
data() {
@ -75,7 +76,8 @@ export default {
},
handleSelection() {
if (this.isInstalled && !this.selected) {
this.$emit('update:selected', true);
this.selected=true;
onSelected(this);
}
}
}

View File

@ -52,17 +52,21 @@
</select>
</div>
<div class="m-2">
<label for="model" class="block mb-2 text-sm font-medium text-gray-900 dark:text-white">
Model:
</label>
<select id="model"
class="bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5 dark:bg-gray-700 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500">
</div>
</div>
<div
class="flex flex-col mb-2 p-3 rounded-lg bg-bg-light-tone dark:bg-bg-dark-tone hover:bg-bg-light-tone-panel hover:dark:bg-bg-dark-tone-panel duration-150 shadow-lg">
<div class="flex flex-row ">
<button @click.stop="bec_collapsed = !bec_collapsed"
class="text-2xl hover:text-primary duration-75 p-2 -m-2 w-full text-left active:translate-y-1">
<!-- <i data-feather="chevron-right"></i> -->
<option v-for="item in modelsArr" :selected="item === configFile.model">{{ item }}</option>
</select>
</div>
<h3 class="text-lg font-semibold cursor-pointer select-none "
@click.stop="bec_collapsed = !bec_collapsed">
Models zoo</h3>
</button>
</div>
<div :class="{ 'hidden': bec_collapsed }" class="flex flex-col mb-2 p-2">
<div v-if="models.length > 0" class="my-2">
<label for="model" class="block ml-2 mb-2 text-sm font-medium text-gray-900 dark:text-white">
Install more models:
@ -71,12 +75,12 @@
<model-entry v-for="(model, index) in models" :key="index" :title="model.title" :icon="model.icon"
:path="model.path" :description="model.description" :is-installed="model.isInstalled"
:on-install="onInstall"
:on-uninstall="onUninstall" />
:on-uninstall="onUninstall"
:on-selected="onSelected" />
</div>
</div>
</div>
</div>
<!-- PERSONALITY -->
<div
class="flex flex-col mb-2 p-3 rounded-lg bg-bg-light-tone dark:bg-bg-dark-tone hover:bg-bg-light-tone-panel hover:dark:bg-bg-dark-tone-panel duration-150 shadow-lg">
@ -321,7 +325,6 @@ export default {
mc_collapsed: false,
// Settings stuff
backendsArr: [],
modelsArr: [],
persLangArr: [],
persCatgArr: [],
persArr: [],
@ -343,6 +346,9 @@ export default {
console.log(error);
});
},
onSelected(model_object){
update_setting('model', model_object.title)
},
// Model installation
onInstall(model_object) {
let isInstalled = model_object.isInstalled
@ -354,7 +360,7 @@ export default {
const progressListener = (response) => {
if (response.status === 'progress') {
this.progress = message.progress;
} else if (message.status === 'succeeded') {
} else if (response.status === 'succeeded') {
// Installation completed
model_object.installing = false;
this.showProgress = false;
@ -363,7 +369,7 @@ export default {
this.models[index].isInstalled = true;
this.socket.off('install_progress', progressListener);
} else if (message.status === 'failed') {
} else if (response.status === 'failed') {
// Installation failed or encountered an error
model_object.installing = false;
this.showProgress = false;
@ -381,22 +387,21 @@ export default {
const progressListener = (response) => {
if (response.status === 'progress') {
this.progress = message.progress;
} else if (message.status === 'succeeded') {
// Installation completed
model_object.uninstalling = false;
} else if (response.status === 'succeeded') {
// Installation completed
model_object.uninstalling = false;
this.showProgress = false;
// Update the isInstalled property of the corresponding model
const index = this.models.findIndex((model) => model.path === path);
this.models[index].isInstalled = false;
this.showProgress = false;
// Update the isInstalled property of the corresponding model
model_object.isInstalled = false;
this.socket.off('install_progress', progressListener);
} else if (message.status === 'failed') {
// Installation failed or encountered an error
model_object.uninstalling = false;
this.showProgress = false;
this.socket.off('install_progress', progressListener);
console.error('Installation failed:', message.error);
this.socket.off('install_progress', progressListener);
} else if (response.status === 'failed') {
// Installation failed or encountered an error
model_object.uninstalling = false;
this.showProgress = false;
this.socket.off('install_progress', progressListener);
console.error('Installation failed:', message.error);
}
};
this.socket.on('install_progress', progressListener);
@ -417,7 +422,19 @@ export default {
//this.api_get_req("list_personalities_categories").then(response => { this.persCatgArr = response })
//this.api_get_req("list_personalities").then(response => { this.persArr = response })
//this.api_get_req("list_languages").then(response => { this.langArr = response })
this.api_get_req("get_config").then(response => { this.configFile = response })
this.api_get_req("get_config").then(response => {
this.configFile = response
console.log("selecting model")
self.models.forEach(model => {
console.log(`${model} -> ${response["model"]}`)
if(model.title==response["model"]){
model.selected=true;
}
else{
model.selected=false;
}
});
})
},
// Accordeon stuff
toggleAccordion() {
@ -516,7 +533,6 @@ export default {
this.configFile = await this.api_get_req("get_config")
this.backendsArr = await this.api_get_req("list_backends")
this.modelsArr = await this.api_get_req("list_models")
this.persLangArr = await this.api_get_req("list_personalities_languages")
this.persCatgArr = await this.api_get_req("list_personalities_categories")
this.persArr = await this.api_get_req("list_personalities")

View File

@ -0,0 +1,88 @@
[
{
"md5sum": "81a09a0ddf89690372fc296ff7f625af",
"filename": "ggml-gpt4all-j-v1.3-groovy.bin",
"filesize": "3785248281",
"isDefault": "true",
"bestGPTJ": "true",
"description": "Current best commercially licensable model based on GPT-J and trained by Nomic AI on the latest curated GPT4All dataset."
},
{
"md5sum": "91f886b68fbce697e9a3cd501951e455",
"filename": "ggml-gpt4all-l13b-snoozy.bin",
"filesize": "8136770688",
"bestLlama": "true",
"description": "Current best non-commercially licensable model based on Llama 13b and trained by Nomic AI on the latest curated GPT4All dataset."
},
{
"md5sum": "756249d3d6abe23bde3b1ae272628640",
"filename": "ggml-mpt-7b-chat.bin",
"filesize": "4854401050",
"isDefault": "true",
"bestMPT": "true",
"requires": "2.4.1",
"description": "Current best non-commercially licensable chat model based on MPT and trained by Mosaic ML."
},
{
"md5sum": "879344aaa9d62fdccbda0be7a09e7976",
"filename": "ggml-gpt4all-j-v1.2-jazzy.bin",
"filesize": "3785248281",
"description": "A commercially licensable model based on GPT-J and trained by Nomic AI on the v2 GPT4All dataset."
},
{
"md5sum": "61d48a82cb188cceb14ebb8082bfec37",
"filename": "ggml-gpt4all-j-v1.1-breezy.bin",
"filesize": "3785248281",
"description": "A commercially licensable model based on GPT-J and trained by Nomic AI on the v1 GPT4All dataset."
},
{
"md5sum": "5b5a3f9b858d33b29b52b89692415595",
"filename": "ggml-gpt4all-j.bin",
"filesize": "3785248281",
"description": "A commercially licensable model based on GPT-J and trained by Nomic AI on the v0 GPT4All dataset."
},
{
"md5sum": "29119f8fa11712704c6b22ac5ab792ea",
"filename": "ggml-vicuna-7b-1.1-q4_2.bin",
"filesize": "4212859520",
"description": "A non-commercially licensable model based on Llama 7b and trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego."
},
{
"md5sum": "95999b7b0699e2070af63bf5d34101a8",
"filename": "ggml-vicuna-13b-1.1-q4_2.bin",
"filesize": "8136770688",
"description": "A non-commercially licensable model based on Llama 13b and trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego."
},
{
"md5sum": "99e6d129745a3f1fb1121abed747b05a",
"filename": "ggml-wizardLM-7B.q4_2.bin",
"filesize": "4212864640",
"description": "A non-commercially licensable model based on Llama 7b and trained by Microsoft and Peking University."
},
{
"md5sum": "6cb4ee297537c9133bddab9692879de0",
"filename": "ggml-stable-vicuna-13B.q4_2.bin",
"filesize": "8136777088",
"description": "A non-commercially licensable model based on Llama 13b and RLHF trained by Stable AI."
},
{
"md5sum": "120c32a51d020066288df045ef5d52b9",
"filename": "ggml-mpt-7b-base.bin",
"filesize": "4854401028",
"requires": "2.4.1",
"description": "A commercially licensable model base pre-trained by Mosaic ML."
},
{
"md5sum": "d5eafd5b0bd0d615cfd5fd763f642dfe",
"filename": "ggml-nous-gpt4-vicuna-13b.bin",
"filesize": "8136777088",
"description": "A non-commercially licensable model based on Vicuna 13b, fine-tuned on ~180,000 instructions, trained by Nous Research."
},
{
"md5sum": "1cfa4958f489f0a0d1ffdf6b37322809",
"filename": "ggml-mpt-7b-instruct.bin",
"filesize": "4854401028",
"requires": "2.4.1",
"description": "A commericially licensable instruct model based on MPT and trained by Mosaic ML."
}
]