Working Open AI version
28
app.py
@ -119,6 +119,9 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
|
||||
self.add_endpoint("/", "", self.index, methods=["GET"])
|
||||
self.add_endpoint("/<path:filename>", "serve_static", self.serve_static, methods=["GET"])
|
||||
|
||||
self.add_endpoint("/images/<path:filename>", "serve_images", self.serve_images, methods=["GET"])
|
||||
self.add_endpoint("/bindings/<path:filename>", "serve_bindings", self.serve_bindings, methods=["GET"])
|
||||
self.add_endpoint("/personalities/<path:filename>", "serve_personalities", self.serve_personalities, methods=["GET"])
|
||||
self.add_endpoint("/outputs/<path:filename>", "serve_outputs", self.serve_outputs, methods=["GET"])
|
||||
|
||||
@ -452,7 +455,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
self.binding = self.process.load_binding(self.config["binding"], install=True)
|
||||
|
||||
except Exception as ex:
|
||||
print("Couldn't build binding")
|
||||
print(f"Couldn't build binding: [{ex}]")
|
||||
return jsonify({'setting_name': data['setting_name'], "status":False, 'error':str(ex)})
|
||||
else:
|
||||
if self.config["debug"]:
|
||||
@ -508,6 +511,10 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
try:
|
||||
bnd = load_config(card)
|
||||
bnd["folder"]=f.stem
|
||||
icon_path = Path(f/"logo.png")
|
||||
if icon_path.exists():
|
||||
bnd["icon"]=str(icon_path)
|
||||
|
||||
bindings.append(bnd)
|
||||
except Exception as ex:
|
||||
print(f"Couldn't load backend card : {f}\n\t{ex}")
|
||||
@ -610,6 +617,21 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
fn = filename.split("/")[-1]
|
||||
return send_from_directory(path, fn)
|
||||
|
||||
|
||||
def serve_images(self, filename):
|
||||
root_dir = os.getcwd()
|
||||
path = os.path.join(root_dir, 'images/')+"/".join(filename.split("/")[:-1])
|
||||
|
||||
fn = filename.split("/")[-1]
|
||||
return send_from_directory(path, fn)
|
||||
|
||||
def serve_bindings(self, filename):
|
||||
root_dir = os.getcwd()
|
||||
path = os.path.join(root_dir, 'bindings/')+"/".join(filename.split("/")[:-1])
|
||||
|
||||
fn = filename.split("/")[-1]
|
||||
return send_from_directory(path, fn)
|
||||
|
||||
def serve_personalities(self, filename):
|
||||
root_dir = os.getcwd()
|
||||
path = os.path.join(root_dir, 'personalities/')+"/".join(filename.split("/")[:-1])
|
||||
@ -838,12 +860,13 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
owner_link = model.get("owner_link", 'https://github.com/ParisNeo')
|
||||
filesize = int(model.get('filesize',0))
|
||||
description = model.get('description',"")
|
||||
model_type = model.get("model_type","")
|
||||
if server.endswith("/"):
|
||||
path = f'{server}{filename}'
|
||||
else:
|
||||
path = f'{server}/{filename}'
|
||||
local_path = Path(f'./models/{self.config["binding"]}/{filename}')
|
||||
is_installed = local_path.exists()
|
||||
is_installed = local_path.exists() or model_type.lower()=="api"
|
||||
models.append({
|
||||
'title': filename,
|
||||
'icon': image_url, # Replace with the path to the model icon
|
||||
@ -854,6 +877,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
||||
'isInstalled': is_installed,
|
||||
'path': path,
|
||||
'filesize': filesize,
|
||||
'model_type': model_type
|
||||
})
|
||||
except Exception as ex:
|
||||
print("#################################")
|
||||
|
@ -57,7 +57,7 @@ class CustomBinding(LLMBinding):
|
||||
Returns:
|
||||
list: A list of tokens representing the tokenized prompt.
|
||||
"""
|
||||
return self.model.tokenize(prompt.encode())
|
||||
return None
|
||||
|
||||
def detokenize(self, tokens_list):
|
||||
"""
|
||||
@ -69,7 +69,7 @@ class CustomBinding(LLMBinding):
|
||||
Returns:
|
||||
str: The detokenized text as a string.
|
||||
"""
|
||||
return self.model.detokenize(tokens_list)
|
||||
return None
|
||||
|
||||
def generate(self,
|
||||
prompt:str,
|
||||
@ -87,17 +87,22 @@ class CustomBinding(LLMBinding):
|
||||
"""
|
||||
try:
|
||||
output = ""
|
||||
self.model.reset()
|
||||
tokens = self.model.tokenize(prompt)
|
||||
count = 0
|
||||
generated_text = """
|
||||
This is an empty binding that shows how you can build your own binding.
|
||||
Find it in bindings
|
||||
Find it in bindings.
|
||||
```python
|
||||
# This is a python snippet
|
||||
print("Hello World")
|
||||
```
|
||||
|
||||
This is a photo
|
||||
![](/images/icon.png)
|
||||
"""
|
||||
for tok in re.split(r' |\n', generated_text):
|
||||
if count >= n_predict or self.model.is_eos_token(tok):
|
||||
for tok in re.split(r'(\s+)', generated_text):
|
||||
if count >= n_predict:
|
||||
break
|
||||
word = self.model.detokenize(tok)
|
||||
word = tok
|
||||
if new_text_callback is not None:
|
||||
if not new_text_callback(word):
|
||||
break
|
||||
|
BIN
bindings/backend_template/logo.png
Normal file
After Width: | Height: | Size: 237 KiB |
@ -4,4 +4,5 @@
|
||||
owner_link: https://link_to_the_owner_web_page
|
||||
owner: Owner_name
|
||||
server: https://Path_to_the_server_to_load_from
|
||||
sha256: The Hash code of the file
|
||||
sha256: The Hash code of the file
|
||||
model_type: api # api or file
|
@ -106,10 +106,10 @@ class CTRansformers(LLMBinding):
|
||||
count = 0
|
||||
for tok in self.model.generate(
|
||||
tokens,
|
||||
top_k=self.config['top_k'],
|
||||
top_p=self.config['top_p'],
|
||||
temperature=self.config['temperature'],
|
||||
repetition_penalty=self.config['repeat_penalty'],
|
||||
top_k=gpt_params['top_k'],
|
||||
top_p=gpt_params['top_p'],
|
||||
temperature=gpt_params['temperature'],
|
||||
repetition_penalty=gpt_params['repeat_penalty'],
|
||||
seed=self.config['seed'],
|
||||
batch_size=1,
|
||||
threads = self.config['n_threads'],
|
||||
|
BIN
bindings/c_transformers/logo.png
Normal file
After Width: | Height: | Size: 268 KiB |
1
bindings/config_local.yaml
Normal file
@ -0,0 +1 @@
|
||||
pdf_file_path: ''
|
@ -96,10 +96,10 @@ class GPT4ALL(LLMBinding):
|
||||
self.model._response_callback = local_callback
|
||||
self.model.generate(prompt,
|
||||
n_predict=n_predict,
|
||||
temp=self.config['temperature'],
|
||||
top_k=self.config['top_k'],
|
||||
top_p=self.config['top_p'],
|
||||
repeat_penalty=self.config['repeat_penalty'],
|
||||
temp=gpt_params["temp"],
|
||||
top_k=gpt_params['top_k'],
|
||||
top_p=gpt_params['top_p'],
|
||||
repeat_penalty=gpt_params['repeat_penalty'],
|
||||
repeat_last_n = self.config['repeat_last_n'],
|
||||
# n_threads=self.config['n_threads'],
|
||||
streaming=False,
|
||||
|
BIN
bindings/gpt_4all/logo.png
Normal file
After Width: | Height: | Size: 22 KiB |
@ -80,10 +80,10 @@ class GptJ(LLMBinding):
|
||||
output = ""
|
||||
for tok in self.model.generate(prompt,
|
||||
n_predict=n_predict,
|
||||
temp=self.config['temperature'],
|
||||
top_k=self.config['top_k'],
|
||||
top_p=self.config['top_p'],
|
||||
#repeat_penalty=self.config['repeat_penalty'],
|
||||
temp=gpt_params["temp"],
|
||||
top_k=gpt_params['top_k'],
|
||||
top_p=gpt_params['top_p'],
|
||||
#repeat_penalty=gpt_params['repeat_penalty'],
|
||||
#repeat_last_n = self.config['repeat_last_n'],
|
||||
n_threads=self.config['n_threads'],
|
||||
):
|
||||
|
@ -83,10 +83,10 @@ class GPTJ(LLMBinding):
|
||||
seed=self.config['seed'],
|
||||
n_threads=self.config['n_threads'],
|
||||
n_predict=n_predict,
|
||||
top_k=self.config['top_k'],
|
||||
top_p=self.config['top_p'],
|
||||
temp=self.config['temperature'],
|
||||
repeat_penalty=self.config['repeat_penalty'],
|
||||
top_k=gpt_params['top_k'],
|
||||
top_p=gpt_params['top_p'],
|
||||
temp=gpt_params["temp"],
|
||||
repeat_penalty=gpt_params['repeat_penalty'],
|
||||
repeat_last_n=self.config['repeat_last_n'],
|
||||
n_batch=8,
|
||||
reset=True,
|
||||
|
BIN
bindings/gptq/logo.png
Normal file
After Width: | Height: | Size: 357 KiB |
@ -87,10 +87,10 @@ class LLAMACPP(LLMBinding):
|
||||
tokens = self.model.tokenize(prompt.encode())
|
||||
count = 0
|
||||
for tok in self.model.generate(tokens,
|
||||
temp=self.config['temperature'],
|
||||
top_k=self.config['top_k'],
|
||||
top_p=self.config['top_p'],
|
||||
repeat_penalty=self.config['repeat_penalty'],
|
||||
temp=gpt_params["temp"],
|
||||
top_k=gpt_params['top_k'],
|
||||
top_p=gpt_params['top_p'],
|
||||
repeat_penalty=gpt_params['repeat_penalty'],
|
||||
):
|
||||
if count >= n_predict or (tok == self.model.token_eos()):
|
||||
break
|
||||
|
BIN
bindings/llama_cpp_official/logo.png
Normal file
After Width: | Height: | Size: 164 KiB |
@ -42,8 +42,10 @@ class OpenAIGPT(LLMBinding):
|
||||
# The local config can be used to store personal information that shouldn't be shared like chatgpt Key
|
||||
# or other personal information
|
||||
# This file is never commited to the repository as it is ignored by .gitignore
|
||||
self.config = config
|
||||
self._local_config_file_path = Path(__file__).parent/"config_local.yaml"
|
||||
self.config = load_config(self._local_config_file_path)
|
||||
self.local_config = load_config(self._local_config_file_path)
|
||||
openai.api_key = self.local_config["openai_key"]
|
||||
|
||||
# Do your initialization stuff
|
||||
|
||||
@ -57,7 +59,7 @@ class OpenAIGPT(LLMBinding):
|
||||
Returns:
|
||||
list: A list of tokens representing the tokenized prompt.
|
||||
"""
|
||||
return self.model.tokenize(prompt.encode())
|
||||
return None
|
||||
|
||||
def detokenize(self, tokens_list):
|
||||
"""
|
||||
@ -69,7 +71,7 @@ class OpenAIGPT(LLMBinding):
|
||||
Returns:
|
||||
str: The detokenized text as a string.
|
||||
"""
|
||||
return self.model.detokenize(tokens_list)
|
||||
return None
|
||||
|
||||
def generate(self,
|
||||
prompt:str,
|
||||
@ -86,35 +88,21 @@ class OpenAIGPT(LLMBinding):
|
||||
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
|
||||
"""
|
||||
try:
|
||||
output = ""
|
||||
self.model.reset()
|
||||
tokens = self.model.tokenize(prompt)
|
||||
count = 0
|
||||
generated_text = """
|
||||
This is an empty binding that shows how you can build your own binding.
|
||||
Find it in bindings
|
||||
"""
|
||||
|
||||
response = openai.Completion.create(
|
||||
engine='text-davinci-003', # Choose the engine according to your OpenAI plan
|
||||
engine=self.config["model"], # Choose the engine according to your OpenAI plan
|
||||
prompt=prompt,
|
||||
max_tokens=100, # Adjust the desired length of the generated response
|
||||
max_tokens=n_predict, # Adjust the desired length of the generated response
|
||||
n=1, # Specify the number of responses you want
|
||||
stop=None, # Define a stop sequence if needed
|
||||
temperature=0.7 # Adjust the temperature for more or less randomness in the output
|
||||
temperature=gpt_params["temp"] # Adjust the temperature for more or less randomness in the output
|
||||
)
|
||||
for tok in re.split(r' |\n', generated_text):
|
||||
if count >= n_predict or self.model.is_eos_token(tok):
|
||||
break
|
||||
word = self.model.detokenize(tok)
|
||||
if new_text_callback is not None:
|
||||
if not new_text_callback(word):
|
||||
break
|
||||
output += word
|
||||
count += 1
|
||||
|
||||
# Extract the generated reply from the API response
|
||||
reply = response.choices[0].text.strip()
|
||||
return reply
|
||||
except Exception as ex:
|
||||
print(ex)
|
||||
return output
|
||||
return ""
|
||||
|
||||
|
||||
@staticmethod
|
||||
|
@ -3,7 +3,7 @@ from pathlib import Path
|
||||
import requests
|
||||
from tqdm import tqdm
|
||||
from api.config import save_config
|
||||
|
||||
import yaml
|
||||
class Install:
|
||||
def __init__(self, api):
|
||||
# Get the current directory
|
||||
@ -11,7 +11,7 @@ class Install:
|
||||
install_file = current_dir / ".installed"
|
||||
|
||||
if not install_file.exists():
|
||||
print("-------------- Template binding -------------------------------")
|
||||
print("-------------- OpenAI Binding -------------------------------")
|
||||
print("This is the first time you are using this binding.")
|
||||
print("Installing ...")
|
||||
# Step 2: Install dependencies using pip from requirements.txt
|
||||
@ -24,14 +24,12 @@ class Install:
|
||||
|
||||
#Create
|
||||
self._local_config_file_path = Path(__file__).parent/"config_local.yaml"
|
||||
if not self._local_config_file_path.exists:
|
||||
if not self._local_config_file_path.exists():
|
||||
key = input("Please enter your Open AI Key")
|
||||
config={
|
||||
"openai_key":key
|
||||
}
|
||||
self.config = save_config(config, self._local_config_file_path)
|
||||
|
||||
|
||||
#Create the install file (a file that is used to insure the installation was done correctly)
|
||||
with open(install_file,"w") as f:
|
||||
f.write("ok")
|
||||
@ -41,4 +39,24 @@ class Install:
|
||||
"""Installs pytorch with cuda (if you have a gpu)
|
||||
"""
|
||||
subprocess.run(["pip", "install", "torch", "torchvision", "torchaudio", "--no-cache-dir", "--index-url", "https://download.pytorch.org/whl/cu117"])
|
||||
|
||||
|
||||
|
||||
def create_config_file(self):
|
||||
"""
|
||||
Create a config_local.yaml file with predefined data.
|
||||
|
||||
The function creates a config_local.yaml file with the specified data. The file is saved in the parent directory
|
||||
of the current file.
|
||||
|
||||
Args:
|
||||
None
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
data = {
|
||||
"pdf_file_path": "" # Path to the PDF that will be discussed
|
||||
}
|
||||
path = Path(__file__).parent.parent / 'config_local.yaml'
|
||||
with open(path, 'w') as file:
|
||||
yaml.dump(data, file)
|
BIN
bindings/open_ai/logo.png
Normal file
After Width: | Height: | Size: 6.1 KiB |
@ -1,8 +1,39 @@
|
||||
- description: ChatGPT model
|
||||
icon: https://www.google.fr/url?sa=i&url=https%3A%2F%2Fcommons.wikimedia.org%2Fwiki%2FFile%3AChatGPT_logo.svg&psig=AOvVaw1rUG9Bl0WfHOYRJF7LgSmA&ust=1685107628710000&source=images&cd=vfe&ved=0CBEQjRxqFwoTCJDr0J7JkP8CFQAAAAAdAAAAABAE
|
||||
filename: ChatGpt by Open AI
|
||||
- description: Most advanced language model by OpenAI based on GPT-3 architecture, offering powerful language generation capabilities.
|
||||
icon: /bindings/open_ai/logo.png
|
||||
filename: gpt-3.5-turbo
|
||||
license: Commercial
|
||||
owner_link: https://link_to_the_owner_web_page
|
||||
owner: Open AI
|
||||
server: https://openai.com
|
||||
sha256: NONE
|
||||
sha256: ~
|
||||
model_type: api
|
||||
|
||||
- description: Highly capable language model for generating high-quality text and performing various natural language processing tasks.
|
||||
icon: /bindings/open_ai/logo.png
|
||||
filename: text-davinci-003
|
||||
license: Commercial
|
||||
owner_link: https://link_to_the_owner_web_page
|
||||
owner: Open AI
|
||||
server: https://openai.com
|
||||
sha256: ~
|
||||
model_type: api
|
||||
|
||||
- description: Earlier version of the text-davinci model, offering similar functionality with potentially slightly different performance characteristics.
|
||||
icon: /bindings/open_ai/logo.png
|
||||
filename: text-davinci-002
|
||||
license: Commercial
|
||||
owner_link: https://link_to_the_owner_web_page
|
||||
owner: Open AI
|
||||
server: https://openai.com
|
||||
sha256: ~
|
||||
model_type: api
|
||||
|
||||
- description: Original version of the text-davinci model, providing strong language generation capabilities.
|
||||
icon: /bindings/open_ai/logo.png
|
||||
filename: text-davinci-001
|
||||
license: Commercial
|
||||
owner_link: https://link_to_the_owner_web_page
|
||||
owner: Open AI
|
||||
server: https://openai.com
|
||||
sha256: ~
|
||||
model_type: api
|
||||
|
@ -1 +1,2 @@
|
||||
openai
|
||||
openai
|
||||
tiktoken
|
@ -83,10 +83,10 @@ class PyLLAMACPP(LLMBinding):
|
||||
output = ""
|
||||
for tok in self.model.generate(prompt,
|
||||
n_predict=n_predict,
|
||||
temp=self.config['temperature'],
|
||||
top_k=self.config['top_k'],
|
||||
top_p=self.config['top_p'],
|
||||
repeat_penalty=self.config['repeat_penalty'],
|
||||
temp=gpt_params['temperature'],
|
||||
top_k=gpt_params['top_k'],
|
||||
top_p=gpt_params['top_p'],
|
||||
repeat_penalty=gpt_params['repeat_penalty'],
|
||||
repeat_last_n = self.config['repeat_last_n'],
|
||||
n_threads=self.config['n_threads'],
|
||||
):
|
||||
|
BIN
bindings/py_llama_cpp/logo.png
Normal file
After Width: | Height: | Size: 237 KiB |
@ -4,4 +4,4 @@ In this short video we will look at bindings and models, how to select them and
|
||||
|
||||
First, to select a binding, go to the settings tab then Bindings zoo.
|
||||
|
||||
You will find a certain numbre of
|
||||
You will find a certain numbre of bindings. Each binding as built by a person or a team. Make sure to visit their
|
BIN
images/icon.png
Normal file
After Width: | Height: | Size: 37 KiB |
@ -31,6 +31,11 @@
|
||||
<b>Version: </b>
|
||||
{{ binding.version }}
|
||||
</div>
|
||||
<a :href="binding.link" target="_blank" class="flex items-center">
|
||||
<i data-feather="github" class="w-5 m-1"></i>
|
||||
<b>Link: </b>
|
||||
{{ binding.link }}
|
||||
</a>
|
||||
</div>
|
||||
<div class="flex items-center">
|
||||
<i data-feather="info" class="w-5 m-1"></i>
|
||||
|
@ -61,7 +61,7 @@
|
||||
<p class="mx-1 opacity-80">{{ description }}</p>
|
||||
</div>
|
||||
<div class="flex-shrink-0" >
|
||||
<button class="px-4 py-2 rounded-md text-white font-bold transition-colors duration-300"
|
||||
<button v-if="model_type !== 'api'" class="px-4 py-2 rounded-md text-white font-bold transition-colors duration-300"
|
||||
:class="[isInstalled ? 'bg-red-500 hover:bg-red-600' : linkNotValid ? 'bg-gray-500 hover:bg-gray-600' : 'bg-green-500 hover:bg-green-600']"
|
||||
:disabled="installing || uninstalling" @click.stop="toggleInstall">
|
||||
<template v-if="installing">
|
||||
@ -84,6 +84,7 @@
|
||||
{{ isInstalled ? model.isCustomModel ? 'Delete' : 'Uninstall' : linkNotValid ? 'Link is not valid':'Install' }}
|
||||
</template>
|
||||
</button>
|
||||
|
||||
</div>
|
||||
|
||||
</div>
|
||||
@ -110,7 +111,8 @@ export default {
|
||||
onUninstall: Function,
|
||||
onSelected: Function,
|
||||
selected: Boolean,
|
||||
model: Object
|
||||
model: Object,
|
||||
model_type: String
|
||||
},
|
||||
data() {
|
||||
return {
|
||||
@ -136,54 +138,57 @@ export default {
|
||||
return filesize(size)
|
||||
},
|
||||
async getFileSize(url) {
|
||||
try {
|
||||
console.log(this.model_type);
|
||||
if(this.model_type!="api"){
|
||||
try {
|
||||
const res = await axios.head(url)
|
||||
//console.log("addddd",url, res.headers)
|
||||
if (res) {
|
||||
|
||||
if (res.headers["content-length"]) {
|
||||
return this.computedFileSize(res.headers["content-length"])
|
||||
}
|
||||
if (this.model.filesize) {
|
||||
return this.computedFileSize(this.model.filesize)
|
||||
}
|
||||
return 'Could not be determined'
|
||||
|
||||
const res = await axios.head(url)
|
||||
//console.log("addddd",url, res.headers)
|
||||
if (res) {
|
||||
|
||||
if (res.headers["content-length"]) {
|
||||
return this.computedFileSize(res.headers["content-length"])
|
||||
}
|
||||
if (this.model.filesize) {
|
||||
|
||||
return this.computedFileSize(this.model.filesize)
|
||||
}
|
||||
return 'Could not be determined'
|
||||
|
||||
}
|
||||
if (this.model.filesize) {
|
||||
|
||||
return this.computedFileSize(this.model.filesize)
|
||||
}
|
||||
return 'Could not be determined'
|
||||
|
||||
// Example response
|
||||
// {
|
||||
// date: 'Tue, 03 Apr 2018 14:29:32 GMT',
|
||||
// 'content-type': 'application/javascript; charset=utf-8',
|
||||
// 'content-length': '9068',
|
||||
// connection: 'close',
|
||||
// 'last-modified': 'Wed, 28 Feb 2018 04:16:30 GMT',
|
||||
// etag: '"5a962d1e-236c"',
|
||||
// expires: 'Sun, 24 Mar 2019 14:29:32 GMT',
|
||||
// 'cache-control': 'public, max-age=30672000',
|
||||
// 'access-control-allow-origin': '*',
|
||||
// 'cf-cache-status': 'HIT',
|
||||
// 'accept-ranges': 'bytes',
|
||||
// 'strict-transport-security': 'max-age=15780000; includeSubDomains',
|
||||
// 'expect-ct': 'max-age=604800, report-uri="https://report-uri.cloudflare.com/cdn-cgi/beacon/expect-ct"',
|
||||
// server: 'cloudflare',
|
||||
// 'cf-ray': '405c3a5cba7a68ba-CDG'
|
||||
// }
|
||||
// Example response
|
||||
// {
|
||||
// date: 'Tue, 03 Apr 2018 14:29:32 GMT',
|
||||
// 'content-type': 'application/javascript; charset=utf-8',
|
||||
// 'content-length': '9068',
|
||||
// connection: 'close',
|
||||
// 'last-modified': 'Wed, 28 Feb 2018 04:16:30 GMT',
|
||||
// etag: '"5a962d1e-236c"',
|
||||
// expires: 'Sun, 24 Mar 2019 14:29:32 GMT',
|
||||
// 'cache-control': 'public, max-age=30672000',
|
||||
// 'access-control-allow-origin': '*',
|
||||
// 'cf-cache-status': 'HIT',
|
||||
// 'accept-ranges': 'bytes',
|
||||
// 'strict-transport-security': 'max-age=15780000; includeSubDomains',
|
||||
// 'expect-ct': 'max-age=604800, report-uri="https://report-uri.cloudflare.com/cdn-cgi/beacon/expect-ct"',
|
||||
// server: 'cloudflare',
|
||||
// 'cf-ray': '405c3a5cba7a68ba-CDG'
|
||||
// }
|
||||
|
||||
|
||||
} catch (error) {
|
||||
console.log(error.message,'getFileSize')
|
||||
this.linkNotValid=true
|
||||
return 'Could not be determined'
|
||||
|
||||
} catch (error) {
|
||||
console.log(error.message,'getFileSize')
|
||||
this.linkNotValid=true
|
||||
return 'Could not be determined'
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
},
|
||||
getImgUrl() {
|
||||
|
||||
|
@ -179,7 +179,7 @@
|
||||
:title="model.title" :icon="model.icon" :path="model.path" :owner="model.owner"
|
||||
:owner_link="model.owner_link" :license="model.license" :description="model.description"
|
||||
:is-installed="model.isInstalled" :on-install="onInstall" :on-uninstall="onUninstall"
|
||||
:on-selected="onSelected" :selected="model.title === configFile.model" :model="model" />
|
||||
:on-selected="onSelected" :selected="model.title === configFile.model" :model="model" :model_type="model.model_type" />
|
||||
</TransitionGroup>
|
||||
</div>
|
||||
</div>
|
||||
|