From 59f9413253d04a6bad69e12096469d3923a2f303 Mon Sep 17 00:00:00 2001 From: ParisNeo Date: Thu, 11 May 2023 22:27:23 +0200 Subject: [PATCH] upgraded --- app.py | 16 ++++-- web/src/components/ModelEntry.vue | 4 +- web/src/views/SettingsView.vue | 78 ++++++++++++++++----------- zoos/gpt4all_models_zoo.json | 88 +++++++++++++++++++++++++++++++ 4 files changed, 150 insertions(+), 36 deletions(-) create mode 100644 zoos/gpt4all_models_zoo.json diff --git a/app.py b/app.py index 2b79f658..e6cc8458 100644 --- a/app.py +++ b/app.py @@ -230,27 +230,33 @@ class Gpt4AllWebUI(GPT4AllAPI): response = requests.get(model_path, stream=True) file_size = int(response.headers.get('Content-Length')) downloaded_size = 0 - CHUNK_SIZE = 4096 + CHUNK_SIZE = 8192 def download_chunk(url, start_byte, end_byte, fileobj): headers = {'Range': f'bytes={start_byte}-{end_byte}'} response = requests.get(url, headers=headers, stream=True) + downloaded_bytes = 0 for chunk in response.iter_content(chunk_size=CHUNK_SIZE): if chunk: fileobj.seek(start_byte) fileobj.write(chunk) + downloaded_bytes += len(chunk) start_byte += len(chunk) + return downloaded_bytes + + def download_file(url, file_path, num_threads=4): response = requests.head(url) file_size = int(response.headers.get('Content-Length')) + chunk_size = file_size // num_threads + progress = 0 with open(file_path, 'wb') as fileobj: with tqdm(total=file_size, unit='B', unit_scale=True, unit_divisor=1024) as pbar: with ThreadPoolExecutor(max_workers=num_threads) as executor: futures = [] - chunk_size = file_size // num_threads for i in range(num_threads): start_byte = i * chunk_size @@ -258,8 +264,10 @@ class Gpt4AllWebUI(GPT4AllAPI): futures.append(executor.submit(download_chunk, url, start_byte, end_byte, fileobj)) for future in tqdm(as_completed(futures), total=num_threads): - future.result() - pbar.update(chunk_size) + downloaded_bytes = future.result() + progress += downloaded_bytes + pbar.update(downloaded_bytes) + socketio.emit('install_progress', {'status': 'progress', 'progress': progress}) # Usage example download_file(model_path, installation_path, num_threads=4) diff --git a/web/src/components/ModelEntry.vue b/web/src/components/ModelEntry.vue index a56ad71f..4d9ef3c4 100644 --- a/web/src/components/ModelEntry.vue +++ b/web/src/components/ModelEntry.vue @@ -53,6 +53,7 @@ export default { isInstalled: Boolean, onInstall: Function, onUninstall: Function, + onSelected: Function, selected: Boolean }, data() { @@ -75,7 +76,8 @@ export default { }, handleSelection() { if (this.isInstalled && !this.selected) { - this.$emit('update:selected', true); + this.selected=true; + onSelected(this); } } } diff --git a/web/src/views/SettingsView.vue b/web/src/views/SettingsView.vue index 52acea22..84a8d9b8 100644 --- a/web/src/views/SettingsView.vue +++ b/web/src/views/SettingsView.vue @@ -52,17 +52,21 @@ -
- - -
+

+ Models zoo

+ + +
-
@@ -321,7 +325,6 @@ export default { mc_collapsed: false, // Settings stuff backendsArr: [], - modelsArr: [], persLangArr: [], persCatgArr: [], persArr: [], @@ -343,6 +346,9 @@ export default { console.log(error); }); }, + onSelected(model_object){ + update_setting('model', model_object.title) + }, // Model installation onInstall(model_object) { let isInstalled = model_object.isInstalled @@ -354,7 +360,7 @@ export default { const progressListener = (response) => { if (response.status === 'progress') { this.progress = message.progress; - } else if (message.status === 'succeeded') { + } else if (response.status === 'succeeded') { // Installation completed model_object.installing = false; this.showProgress = false; @@ -363,7 +369,7 @@ export default { this.models[index].isInstalled = true; this.socket.off('install_progress', progressListener); - } else if (message.status === 'failed') { + } else if (response.status === 'failed') { // Installation failed or encountered an error model_object.installing = false; this.showProgress = false; @@ -381,22 +387,21 @@ export default { const progressListener = (response) => { if (response.status === 'progress') { this.progress = message.progress; - } else if (message.status === 'succeeded') { - // Installation completed - model_object.uninstalling = false; + } else if (response.status === 'succeeded') { + // Installation completed + model_object.uninstalling = false; - this.showProgress = false; - // Update the isInstalled property of the corresponding model - const index = this.models.findIndex((model) => model.path === path); - this.models[index].isInstalled = false; + this.showProgress = false; + // Update the isInstalled property of the corresponding model + model_object.isInstalled = false; - this.socket.off('install_progress', progressListener); - } else if (message.status === 'failed') { - // Installation failed or encountered an error - model_object.uninstalling = false; - this.showProgress = false; - this.socket.off('install_progress', progressListener); - console.error('Installation failed:', message.error); + this.socket.off('install_progress', progressListener); + } else if (response.status === 'failed') { + // Installation failed or encountered an error + model_object.uninstalling = false; + this.showProgress = false; + this.socket.off('install_progress', progressListener); + console.error('Installation failed:', message.error); } }; this.socket.on('install_progress', progressListener); @@ -417,7 +422,19 @@ export default { //this.api_get_req("list_personalities_categories").then(response => { this.persCatgArr = response }) //this.api_get_req("list_personalities").then(response => { this.persArr = response }) //this.api_get_req("list_languages").then(response => { this.langArr = response }) - this.api_get_req("get_config").then(response => { this.configFile = response }) + this.api_get_req("get_config").then(response => { + this.configFile = response + console.log("selecting model") + self.models.forEach(model => { + console.log(`${model} -> ${response["model"]}`) + if(model.title==response["model"]){ + model.selected=true; + } + else{ + model.selected=false; + } + }); + }) }, // Accordeon stuff toggleAccordion() { @@ -516,7 +533,6 @@ export default { this.configFile = await this.api_get_req("get_config") this.backendsArr = await this.api_get_req("list_backends") - this.modelsArr = await this.api_get_req("list_models") this.persLangArr = await this.api_get_req("list_personalities_languages") this.persCatgArr = await this.api_get_req("list_personalities_categories") this.persArr = await this.api_get_req("list_personalities") diff --git a/zoos/gpt4all_models_zoo.json b/zoos/gpt4all_models_zoo.json new file mode 100644 index 00000000..bfaa5bda --- /dev/null +++ b/zoos/gpt4all_models_zoo.json @@ -0,0 +1,88 @@ +[ + { + "md5sum": "81a09a0ddf89690372fc296ff7f625af", + "filename": "ggml-gpt4all-j-v1.3-groovy.bin", + "filesize": "3785248281", + "isDefault": "true", + "bestGPTJ": "true", + "description": "Current best commercially licensable model based on GPT-J and trained by Nomic AI on the latest curated GPT4All dataset." + }, + { + "md5sum": "91f886b68fbce697e9a3cd501951e455", + "filename": "ggml-gpt4all-l13b-snoozy.bin", + "filesize": "8136770688", + "bestLlama": "true", + "description": "Current best non-commercially licensable model based on Llama 13b and trained by Nomic AI on the latest curated GPT4All dataset." + }, + { + "md5sum": "756249d3d6abe23bde3b1ae272628640", + "filename": "ggml-mpt-7b-chat.bin", + "filesize": "4854401050", + "isDefault": "true", + "bestMPT": "true", + "requires": "2.4.1", + "description": "Current best non-commercially licensable chat model based on MPT and trained by Mosaic ML." + }, + { + "md5sum": "879344aaa9d62fdccbda0be7a09e7976", + "filename": "ggml-gpt4all-j-v1.2-jazzy.bin", + "filesize": "3785248281", + "description": "A commercially licensable model based on GPT-J and trained by Nomic AI on the v2 GPT4All dataset." + }, + { + "md5sum": "61d48a82cb188cceb14ebb8082bfec37", + "filename": "ggml-gpt4all-j-v1.1-breezy.bin", + "filesize": "3785248281", + "description": "A commercially licensable model based on GPT-J and trained by Nomic AI on the v1 GPT4All dataset." + }, + { + "md5sum": "5b5a3f9b858d33b29b52b89692415595", + "filename": "ggml-gpt4all-j.bin", + "filesize": "3785248281", + "description": "A commercially licensable model based on GPT-J and trained by Nomic AI on the v0 GPT4All dataset." + }, + { + "md5sum": "29119f8fa11712704c6b22ac5ab792ea", + "filename": "ggml-vicuna-7b-1.1-q4_2.bin", + "filesize": "4212859520", + "description": "A non-commercially licensable model based on Llama 7b and trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego." + }, + { + "md5sum": "95999b7b0699e2070af63bf5d34101a8", + "filename": "ggml-vicuna-13b-1.1-q4_2.bin", + "filesize": "8136770688", + "description": "A non-commercially licensable model based on Llama 13b and trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego." + }, + { + "md5sum": "99e6d129745a3f1fb1121abed747b05a", + "filename": "ggml-wizardLM-7B.q4_2.bin", + "filesize": "4212864640", + "description": "A non-commercially licensable model based on Llama 7b and trained by Microsoft and Peking University." + }, + { + "md5sum": "6cb4ee297537c9133bddab9692879de0", + "filename": "ggml-stable-vicuna-13B.q4_2.bin", + "filesize": "8136777088", + "description": "A non-commercially licensable model based on Llama 13b and RLHF trained by Stable AI." + }, + { + "md5sum": "120c32a51d020066288df045ef5d52b9", + "filename": "ggml-mpt-7b-base.bin", + "filesize": "4854401028", + "requires": "2.4.1", + "description": "A commercially licensable model base pre-trained by Mosaic ML." + }, + { + "md5sum": "d5eafd5b0bd0d615cfd5fd763f642dfe", + "filename": "ggml-nous-gpt4-vicuna-13b.bin", + "filesize": "8136777088", + "description": "A non-commercially licensable model based on Vicuna 13b, fine-tuned on ~180,000 instructions, trained by Nous Research." + }, + { + "md5sum": "1cfa4958f489f0a0d1ffdf6b37322809", + "filename": "ggml-mpt-7b-instruct.bin", + "filesize": "4854401028", + "requires": "2.4.1", + "description": "A commericially licensable instruct model based on MPT and trained by Mosaic ML." + } + ] \ No newline at end of file