diff --git a/app.py b/app.py index 2dc77410..2da757d0 100644 --- a/app.py +++ b/app.py @@ -119,6 +119,9 @@ class Gpt4AllWebUI(GPT4AllAPI): self.add_endpoint("/", "", self.index, methods=["GET"]) self.add_endpoint("/", "serve_static", self.serve_static, methods=["GET"]) + + self.add_endpoint("/images/", "serve_images", self.serve_images, methods=["GET"]) + self.add_endpoint("/bindings/", "serve_bindings", self.serve_bindings, methods=["GET"]) self.add_endpoint("/personalities/", "serve_personalities", self.serve_personalities, methods=["GET"]) self.add_endpoint("/outputs/", "serve_outputs", self.serve_outputs, methods=["GET"]) @@ -452,7 +455,7 @@ class Gpt4AllWebUI(GPT4AllAPI): self.binding = self.process.load_binding(self.config["binding"], install=True) except Exception as ex: - print("Couldn't build binding") + print(f"Couldn't build binding: [{ex}]") return jsonify({'setting_name': data['setting_name'], "status":False, 'error':str(ex)}) else: if self.config["debug"]: @@ -508,6 +511,10 @@ class Gpt4AllWebUI(GPT4AllAPI): try: bnd = load_config(card) bnd["folder"]=f.stem + icon_path = Path(f/"logo.png") + if icon_path.exists(): + bnd["icon"]=str(icon_path) + bindings.append(bnd) except Exception as ex: print(f"Couldn't load backend card : {f}\n\t{ex}") @@ -610,6 +617,21 @@ class Gpt4AllWebUI(GPT4AllAPI): fn = filename.split("/")[-1] return send_from_directory(path, fn) + + def serve_images(self, filename): + root_dir = os.getcwd() + path = os.path.join(root_dir, 'images/')+"/".join(filename.split("/")[:-1]) + + fn = filename.split("/")[-1] + return send_from_directory(path, fn) + + def serve_bindings(self, filename): + root_dir = os.getcwd() + path = os.path.join(root_dir, 'bindings/')+"/".join(filename.split("/")[:-1]) + + fn = filename.split("/")[-1] + return send_from_directory(path, fn) + def serve_personalities(self, filename): root_dir = os.getcwd() path = os.path.join(root_dir, 'personalities/')+"/".join(filename.split("/")[:-1]) @@ -838,12 +860,13 @@ class Gpt4AllWebUI(GPT4AllAPI): owner_link = model.get("owner_link", 'https://github.com/ParisNeo') filesize = int(model.get('filesize',0)) description = model.get('description',"") + model_type = model.get("model_type","") if server.endswith("/"): path = f'{server}{filename}' else: path = f'{server}/{filename}' local_path = Path(f'./models/{self.config["binding"]}/{filename}') - is_installed = local_path.exists() + is_installed = local_path.exists() or model_type.lower()=="api" models.append({ 'title': filename, 'icon': image_url, # Replace with the path to the model icon @@ -854,6 +877,7 @@ class Gpt4AllWebUI(GPT4AllAPI): 'isInstalled': is_installed, 'path': path, 'filesize': filesize, + 'model_type': model_type }) except Exception as ex: print("#################################") diff --git a/bindings/backend_template/__init__.py b/bindings/backend_template/__init__.py index 2da1074f..a4a5d7d1 100644 --- a/bindings/backend_template/__init__.py +++ b/bindings/backend_template/__init__.py @@ -57,7 +57,7 @@ class CustomBinding(LLMBinding): Returns: list: A list of tokens representing the tokenized prompt. """ - return self.model.tokenize(prompt.encode()) + return None def detokenize(self, tokens_list): """ @@ -69,7 +69,7 @@ class CustomBinding(LLMBinding): Returns: str: The detokenized text as a string. """ - return self.model.detokenize(tokens_list) + return None def generate(self, prompt:str, @@ -87,17 +87,22 @@ class CustomBinding(LLMBinding): """ try: output = "" - self.model.reset() - tokens = self.model.tokenize(prompt) count = 0 generated_text = """ This is an empty binding that shows how you can build your own binding. -Find it in bindings +Find it in bindings. +```python +# This is a python snippet +print("Hello World") +``` + +This is a photo +![](/images/icon.png) """ - for tok in re.split(r' |\n', generated_text): - if count >= n_predict or self.model.is_eos_token(tok): + for tok in re.split(r'(\s+)', generated_text): + if count >= n_predict: break - word = self.model.detokenize(tok) + word = tok if new_text_callback is not None: if not new_text_callback(word): break diff --git a/bindings/backend_template/logo.png b/bindings/backend_template/logo.png new file mode 100644 index 00000000..ce863618 Binary files /dev/null and b/bindings/backend_template/logo.png differ diff --git a/bindings/backend_template/models.yaml b/bindings/backend_template/models.yaml index 83c4a9d9..99b84601 100644 --- a/bindings/backend_template/models.yaml +++ b/bindings/backend_template/models.yaml @@ -4,4 +4,5 @@ owner_link: https://link_to_the_owner_web_page owner: Owner_name server: https://Path_to_the_server_to_load_from - sha256: The Hash code of the file \ No newline at end of file + sha256: The Hash code of the file + model_type: api # api or file \ No newline at end of file diff --git a/bindings/c_transformers/__init__.py b/bindings/c_transformers/__init__.py index d38e68c3..75c6c25e 100644 --- a/bindings/c_transformers/__init__.py +++ b/bindings/c_transformers/__init__.py @@ -106,10 +106,10 @@ class CTRansformers(LLMBinding): count = 0 for tok in self.model.generate( tokens, - top_k=self.config['top_k'], - top_p=self.config['top_p'], - temperature=self.config['temperature'], - repetition_penalty=self.config['repeat_penalty'], + top_k=gpt_params['top_k'], + top_p=gpt_params['top_p'], + temperature=gpt_params['temperature'], + repetition_penalty=gpt_params['repeat_penalty'], seed=self.config['seed'], batch_size=1, threads = self.config['n_threads'], diff --git a/bindings/c_transformers/logo.png b/bindings/c_transformers/logo.png new file mode 100644 index 00000000..4a6497b4 Binary files /dev/null and b/bindings/c_transformers/logo.png differ diff --git a/bindings/config_local.yaml b/bindings/config_local.yaml new file mode 100644 index 00000000..70793fdd --- /dev/null +++ b/bindings/config_local.yaml @@ -0,0 +1 @@ +pdf_file_path: '' diff --git a/bindings/gpt_4all/__init__.py b/bindings/gpt_4all/__init__.py index edb3d485..b71c8a56 100644 --- a/bindings/gpt_4all/__init__.py +++ b/bindings/gpt_4all/__init__.py @@ -96,10 +96,10 @@ class GPT4ALL(LLMBinding): self.model._response_callback = local_callback self.model.generate(prompt, n_predict=n_predict, - temp=self.config['temperature'], - top_k=self.config['top_k'], - top_p=self.config['top_p'], - repeat_penalty=self.config['repeat_penalty'], + temp=gpt_params["temp"], + top_k=gpt_params['top_k'], + top_p=gpt_params['top_p'], + repeat_penalty=gpt_params['repeat_penalty'], repeat_last_n = self.config['repeat_last_n'], # n_threads=self.config['n_threads'], streaming=False, diff --git a/bindings/gpt_4all/logo.png b/bindings/gpt_4all/logo.png new file mode 100644 index 00000000..ac81fa6d Binary files /dev/null and b/bindings/gpt_4all/logo.png differ diff --git a/bindings/gpt_j_a/__init__.py b/bindings/gpt_j_a/__init__.py index 76be5840..1c34e33e 100644 --- a/bindings/gpt_j_a/__init__.py +++ b/bindings/gpt_j_a/__init__.py @@ -80,10 +80,10 @@ class GptJ(LLMBinding): output = "" for tok in self.model.generate(prompt, n_predict=n_predict, - temp=self.config['temperature'], - top_k=self.config['top_k'], - top_p=self.config['top_p'], - #repeat_penalty=self.config['repeat_penalty'], + temp=gpt_params["temp"], + top_k=gpt_params['top_k'], + top_p=gpt_params['top_p'], + #repeat_penalty=gpt_params['repeat_penalty'], #repeat_last_n = self.config['repeat_last_n'], n_threads=self.config['n_threads'], ): diff --git a/bindings/gpt_j_m/__init__.py b/bindings/gpt_j_m/__init__.py index 71513a91..8f42a1da 100644 --- a/bindings/gpt_j_m/__init__.py +++ b/bindings/gpt_j_m/__init__.py @@ -83,10 +83,10 @@ class GPTJ(LLMBinding): seed=self.config['seed'], n_threads=self.config['n_threads'], n_predict=n_predict, - top_k=self.config['top_k'], - top_p=self.config['top_p'], - temp=self.config['temperature'], - repeat_penalty=self.config['repeat_penalty'], + top_k=gpt_params['top_k'], + top_p=gpt_params['top_p'], + temp=gpt_params["temp"], + repeat_penalty=gpt_params['repeat_penalty'], repeat_last_n=self.config['repeat_last_n'], n_batch=8, reset=True, diff --git a/bindings/gptq/logo.png b/bindings/gptq/logo.png new file mode 100644 index 00000000..11a285b4 Binary files /dev/null and b/bindings/gptq/logo.png differ diff --git a/bindings/llama_cpp_official/__init__.py b/bindings/llama_cpp_official/__init__.py index ad1c366f..010ce22b 100644 --- a/bindings/llama_cpp_official/__init__.py +++ b/bindings/llama_cpp_official/__init__.py @@ -87,10 +87,10 @@ class LLAMACPP(LLMBinding): tokens = self.model.tokenize(prompt.encode()) count = 0 for tok in self.model.generate(tokens, - temp=self.config['temperature'], - top_k=self.config['top_k'], - top_p=self.config['top_p'], - repeat_penalty=self.config['repeat_penalty'], + temp=gpt_params["temp"], + top_k=gpt_params['top_k'], + top_p=gpt_params['top_p'], + repeat_penalty=gpt_params['repeat_penalty'], ): if count >= n_predict or (tok == self.model.token_eos()): break diff --git a/bindings/llama_cpp_official/logo.png b/bindings/llama_cpp_official/logo.png new file mode 100644 index 00000000..51da30ed Binary files /dev/null and b/bindings/llama_cpp_official/logo.png differ diff --git a/bindings/open_ai/__init__.py b/bindings/open_ai/__init__.py index f3284cd1..310058fc 100644 --- a/bindings/open_ai/__init__.py +++ b/bindings/open_ai/__init__.py @@ -42,8 +42,10 @@ class OpenAIGPT(LLMBinding): # The local config can be used to store personal information that shouldn't be shared like chatgpt Key # or other personal information # This file is never commited to the repository as it is ignored by .gitignore + self.config = config self._local_config_file_path = Path(__file__).parent/"config_local.yaml" - self.config = load_config(self._local_config_file_path) + self.local_config = load_config(self._local_config_file_path) + openai.api_key = self.local_config["openai_key"] # Do your initialization stuff @@ -57,7 +59,7 @@ class OpenAIGPT(LLMBinding): Returns: list: A list of tokens representing the tokenized prompt. """ - return self.model.tokenize(prompt.encode()) + return None def detokenize(self, tokens_list): """ @@ -69,7 +71,7 @@ class OpenAIGPT(LLMBinding): Returns: str: The detokenized text as a string. """ - return self.model.detokenize(tokens_list) + return None def generate(self, prompt:str, @@ -86,35 +88,21 @@ class OpenAIGPT(LLMBinding): verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False. """ try: - output = "" - self.model.reset() - tokens = self.model.tokenize(prompt) - count = 0 - generated_text = """ -This is an empty binding that shows how you can build your own binding. -Find it in bindings -""" - response = openai.Completion.create( - engine='text-davinci-003', # Choose the engine according to your OpenAI plan + engine=self.config["model"], # Choose the engine according to your OpenAI plan prompt=prompt, - max_tokens=100, # Adjust the desired length of the generated response + max_tokens=n_predict, # Adjust the desired length of the generated response n=1, # Specify the number of responses you want stop=None, # Define a stop sequence if needed - temperature=0.7 # Adjust the temperature for more or less randomness in the output + temperature=gpt_params["temp"] # Adjust the temperature for more or less randomness in the output ) - for tok in re.split(r' |\n', generated_text): - if count >= n_predict or self.model.is_eos_token(tok): - break - word = self.model.detokenize(tok) - if new_text_callback is not None: - if not new_text_callback(word): - break - output += word - count += 1 + + # Extract the generated reply from the API response + reply = response.choices[0].text.strip() + return reply except Exception as ex: print(ex) - return output + return "" @staticmethod diff --git a/bindings/open_ai/install.py b/bindings/open_ai/install.py index c0511fe3..7a75e3a6 100644 --- a/bindings/open_ai/install.py +++ b/bindings/open_ai/install.py @@ -3,7 +3,7 @@ from pathlib import Path import requests from tqdm import tqdm from api.config import save_config - +import yaml class Install: def __init__(self, api): # Get the current directory @@ -11,7 +11,7 @@ class Install: install_file = current_dir / ".installed" if not install_file.exists(): - print("-------------- Template binding -------------------------------") + print("-------------- OpenAI Binding -------------------------------") print("This is the first time you are using this binding.") print("Installing ...") # Step 2: Install dependencies using pip from requirements.txt @@ -24,14 +24,12 @@ class Install: #Create self._local_config_file_path = Path(__file__).parent/"config_local.yaml" - if not self._local_config_file_path.exists: + if not self._local_config_file_path.exists(): key = input("Please enter your Open AI Key") config={ "openai_key":key } self.config = save_config(config, self._local_config_file_path) - - #Create the install file (a file that is used to insure the installation was done correctly) with open(install_file,"w") as f: f.write("ok") @@ -41,4 +39,24 @@ class Install: """Installs pytorch with cuda (if you have a gpu) """ subprocess.run(["pip", "install", "torch", "torchvision", "torchaudio", "--no-cache-dir", "--index-url", "https://download.pytorch.org/whl/cu117"]) - \ No newline at end of file + + + def create_config_file(self): + """ + Create a config_local.yaml file with predefined data. + + The function creates a config_local.yaml file with the specified data. The file is saved in the parent directory + of the current file. + + Args: + None + + Returns: + None + """ + data = { + "pdf_file_path": "" # Path to the PDF that will be discussed + } + path = Path(__file__).parent.parent / 'config_local.yaml' + with open(path, 'w') as file: + yaml.dump(data, file) \ No newline at end of file diff --git a/bindings/open_ai/logo.png b/bindings/open_ai/logo.png new file mode 100644 index 00000000..28e8a173 Binary files /dev/null and b/bindings/open_ai/logo.png differ diff --git a/bindings/open_ai/models.yaml b/bindings/open_ai/models.yaml index 6b543881..c12c6421 100644 --- a/bindings/open_ai/models.yaml +++ b/bindings/open_ai/models.yaml @@ -1,8 +1,39 @@ -- description: ChatGPT model - icon: https://www.google.fr/url?sa=i&url=https%3A%2F%2Fcommons.wikimedia.org%2Fwiki%2FFile%3AChatGPT_logo.svg&psig=AOvVaw1rUG9Bl0WfHOYRJF7LgSmA&ust=1685107628710000&source=images&cd=vfe&ved=0CBEQjRxqFwoTCJDr0J7JkP8CFQAAAAAdAAAAABAE - filename: ChatGpt by Open AI +- description: Most advanced language model by OpenAI based on GPT-3 architecture, offering powerful language generation capabilities. + icon: /bindings/open_ai/logo.png + filename: gpt-3.5-turbo license: Commercial owner_link: https://link_to_the_owner_web_page owner: Open AI server: https://openai.com - sha256: NONE \ No newline at end of file + sha256: ~ + model_type: api + +- description: Highly capable language model for generating high-quality text and performing various natural language processing tasks. + icon: /bindings/open_ai/logo.png + filename: text-davinci-003 + license: Commercial + owner_link: https://link_to_the_owner_web_page + owner: Open AI + server: https://openai.com + sha256: ~ + model_type: api + +- description: Earlier version of the text-davinci model, offering similar functionality with potentially slightly different performance characteristics. + icon: /bindings/open_ai/logo.png + filename: text-davinci-002 + license: Commercial + owner_link: https://link_to_the_owner_web_page + owner: Open AI + server: https://openai.com + sha256: ~ + model_type: api + +- description: Original version of the text-davinci model, providing strong language generation capabilities. + icon: /bindings/open_ai/logo.png + filename: text-davinci-001 + license: Commercial + owner_link: https://link_to_the_owner_web_page + owner: Open AI + server: https://openai.com + sha256: ~ + model_type: api diff --git a/bindings/open_ai/requirements.txt b/bindings/open_ai/requirements.txt index f0dd0aec..59537136 100644 --- a/bindings/open_ai/requirements.txt +++ b/bindings/open_ai/requirements.txt @@ -1 +1,2 @@ -openai \ No newline at end of file +openai +tiktoken \ No newline at end of file diff --git a/bindings/py_llama_cpp/__init__.py b/bindings/py_llama_cpp/__init__.py index 9184bafb..9518d6f0 100644 --- a/bindings/py_llama_cpp/__init__.py +++ b/bindings/py_llama_cpp/__init__.py @@ -83,10 +83,10 @@ class PyLLAMACPP(LLMBinding): output = "" for tok in self.model.generate(prompt, n_predict=n_predict, - temp=self.config['temperature'], - top_k=self.config['top_k'], - top_p=self.config['top_p'], - repeat_penalty=self.config['repeat_penalty'], + temp=gpt_params['temperature'], + top_k=gpt_params['top_k'], + top_p=gpt_params['top_p'], + repeat_penalty=gpt_params['repeat_penalty'], repeat_last_n = self.config['repeat_last_n'], n_threads=self.config['n_threads'], ): diff --git a/bindings/py_llama_cpp/logo.png b/bindings/py_llama_cpp/logo.png new file mode 100644 index 00000000..eba37834 Binary files /dev/null and b/bindings/py_llama_cpp/logo.png differ diff --git a/docs/youtube/script_models.md b/docs/youtube/script_models.md index 261ec4c4..6df1df06 100644 --- a/docs/youtube/script_models.md +++ b/docs/youtube/script_models.md @@ -4,4 +4,4 @@ In this short video we will look at bindings and models, how to select them and First, to select a binding, go to the settings tab then Bindings zoo. -You will find a certain numbre of \ No newline at end of file +You will find a certain numbre of bindings. Each binding as built by a person or a team. Make sure to visit their \ No newline at end of file diff --git a/images/icon.png b/images/icon.png new file mode 100644 index 00000000..6b2027a5 Binary files /dev/null and b/images/icon.png differ diff --git a/web/src/components/BindingEntry.vue b/web/src/components/BindingEntry.vue index ca9a7431..2d303537 100644 --- a/web/src/components/BindingEntry.vue +++ b/web/src/components/BindingEntry.vue @@ -31,6 +31,11 @@ Version:  {{ binding.version }} + + + Link:  + {{ binding.link }} +
diff --git a/web/src/components/ModelEntry.vue b/web/src/components/ModelEntry.vue index cf519980..8c4404a4 100644 --- a/web/src/components/ModelEntry.vue +++ b/web/src/components/ModelEntry.vue @@ -61,7 +61,7 @@

{{ description }}

- +
@@ -110,7 +111,8 @@ export default { onUninstall: Function, onSelected: Function, selected: Boolean, - model: Object + model: Object, + model_type: String }, data() { return { @@ -136,54 +138,57 @@ export default { return filesize(size) }, async getFileSize(url) { - try { + console.log(this.model_type); + if(this.model_type!="api"){ + try { + const res = await axios.head(url) + //console.log("addddd",url, res.headers) + if (res) { + + if (res.headers["content-length"]) { + return this.computedFileSize(res.headers["content-length"]) + } + if (this.model.filesize) { + return this.computedFileSize(this.model.filesize) + } + return 'Could not be determined' - const res = await axios.head(url) - //console.log("addddd",url, res.headers) - if (res) { - - if (res.headers["content-length"]) { - return this.computedFileSize(res.headers["content-length"]) } if (this.model.filesize) { + return this.computedFileSize(this.model.filesize) } return 'Could not be determined' - } - if (this.model.filesize) { - - return this.computedFileSize(this.model.filesize) - } - return 'Could not be determined' - - // Example response - // { - // date: 'Tue, 03 Apr 2018 14:29:32 GMT', - // 'content-type': 'application/javascript; charset=utf-8', - // 'content-length': '9068', - // connection: 'close', - // 'last-modified': 'Wed, 28 Feb 2018 04:16:30 GMT', - // etag: '"5a962d1e-236c"', - // expires: 'Sun, 24 Mar 2019 14:29:32 GMT', - // 'cache-control': 'public, max-age=30672000', - // 'access-control-allow-origin': '*', - // 'cf-cache-status': 'HIT', - // 'accept-ranges': 'bytes', - // 'strict-transport-security': 'max-age=15780000; includeSubDomains', - // 'expect-ct': 'max-age=604800, report-uri="https://report-uri.cloudflare.com/cdn-cgi/beacon/expect-ct"', - // server: 'cloudflare', - // 'cf-ray': '405c3a5cba7a68ba-CDG' - // } + // Example response + // { + // date: 'Tue, 03 Apr 2018 14:29:32 GMT', + // 'content-type': 'application/javascript; charset=utf-8', + // 'content-length': '9068', + // connection: 'close', + // 'last-modified': 'Wed, 28 Feb 2018 04:16:30 GMT', + // etag: '"5a962d1e-236c"', + // expires: 'Sun, 24 Mar 2019 14:29:32 GMT', + // 'cache-control': 'public, max-age=30672000', + // 'access-control-allow-origin': '*', + // 'cf-cache-status': 'HIT', + // 'accept-ranges': 'bytes', + // 'strict-transport-security': 'max-age=15780000; includeSubDomains', + // 'expect-ct': 'max-age=604800, report-uri="https://report-uri.cloudflare.com/cdn-cgi/beacon/expect-ct"', + // server: 'cloudflare', + // 'cf-ray': '405c3a5cba7a68ba-CDG' + // } - } catch (error) { - console.log(error.message,'getFileSize') - this.linkNotValid=true - return 'Could not be determined' - + } catch (error) { + console.log(error.message,'getFileSize') + this.linkNotValid=true + return 'Could not be determined' + + } } + }, getImgUrl() { diff --git a/web/src/views/SettingsView.vue b/web/src/views/SettingsView.vue index d5361e2b..57f26f69 100644 --- a/web/src/views/SettingsView.vue +++ b/web/src/views/SettingsView.vue @@ -179,7 +179,7 @@ :title="model.title" :icon="model.icon" :path="model.path" :owner="model.owner" :owner_link="model.owner_link" :license="model.license" :description="model.description" :is-installed="model.isInstalled" :on-install="onInstall" :on-uninstall="onUninstall" - :on-selected="onSelected" :selected="model.title === configFile.model" :model="model" /> + :on-selected="onSelected" :selected="model.title === configFile.model" :model="model" :model_type="model.model_type" />