Working Open AI version
28
app.py
@ -119,6 +119,9 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
|||||||
|
|
||||||
self.add_endpoint("/", "", self.index, methods=["GET"])
|
self.add_endpoint("/", "", self.index, methods=["GET"])
|
||||||
self.add_endpoint("/<path:filename>", "serve_static", self.serve_static, methods=["GET"])
|
self.add_endpoint("/<path:filename>", "serve_static", self.serve_static, methods=["GET"])
|
||||||
|
|
||||||
|
self.add_endpoint("/images/<path:filename>", "serve_images", self.serve_images, methods=["GET"])
|
||||||
|
self.add_endpoint("/bindings/<path:filename>", "serve_bindings", self.serve_bindings, methods=["GET"])
|
||||||
self.add_endpoint("/personalities/<path:filename>", "serve_personalities", self.serve_personalities, methods=["GET"])
|
self.add_endpoint("/personalities/<path:filename>", "serve_personalities", self.serve_personalities, methods=["GET"])
|
||||||
self.add_endpoint("/outputs/<path:filename>", "serve_outputs", self.serve_outputs, methods=["GET"])
|
self.add_endpoint("/outputs/<path:filename>", "serve_outputs", self.serve_outputs, methods=["GET"])
|
||||||
|
|
||||||
@ -452,7 +455,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
|||||||
self.binding = self.process.load_binding(self.config["binding"], install=True)
|
self.binding = self.process.load_binding(self.config["binding"], install=True)
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print("Couldn't build binding")
|
print(f"Couldn't build binding: [{ex}]")
|
||||||
return jsonify({'setting_name': data['setting_name'], "status":False, 'error':str(ex)})
|
return jsonify({'setting_name': data['setting_name'], "status":False, 'error':str(ex)})
|
||||||
else:
|
else:
|
||||||
if self.config["debug"]:
|
if self.config["debug"]:
|
||||||
@ -508,6 +511,10 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
|||||||
try:
|
try:
|
||||||
bnd = load_config(card)
|
bnd = load_config(card)
|
||||||
bnd["folder"]=f.stem
|
bnd["folder"]=f.stem
|
||||||
|
icon_path = Path(f/"logo.png")
|
||||||
|
if icon_path.exists():
|
||||||
|
bnd["icon"]=str(icon_path)
|
||||||
|
|
||||||
bindings.append(bnd)
|
bindings.append(bnd)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print(f"Couldn't load backend card : {f}\n\t{ex}")
|
print(f"Couldn't load backend card : {f}\n\t{ex}")
|
||||||
@ -610,6 +617,21 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
|||||||
fn = filename.split("/")[-1]
|
fn = filename.split("/")[-1]
|
||||||
return send_from_directory(path, fn)
|
return send_from_directory(path, fn)
|
||||||
|
|
||||||
|
|
||||||
|
def serve_images(self, filename):
|
||||||
|
root_dir = os.getcwd()
|
||||||
|
path = os.path.join(root_dir, 'images/')+"/".join(filename.split("/")[:-1])
|
||||||
|
|
||||||
|
fn = filename.split("/")[-1]
|
||||||
|
return send_from_directory(path, fn)
|
||||||
|
|
||||||
|
def serve_bindings(self, filename):
|
||||||
|
root_dir = os.getcwd()
|
||||||
|
path = os.path.join(root_dir, 'bindings/')+"/".join(filename.split("/")[:-1])
|
||||||
|
|
||||||
|
fn = filename.split("/")[-1]
|
||||||
|
return send_from_directory(path, fn)
|
||||||
|
|
||||||
def serve_personalities(self, filename):
|
def serve_personalities(self, filename):
|
||||||
root_dir = os.getcwd()
|
root_dir = os.getcwd()
|
||||||
path = os.path.join(root_dir, 'personalities/')+"/".join(filename.split("/")[:-1])
|
path = os.path.join(root_dir, 'personalities/')+"/".join(filename.split("/")[:-1])
|
||||||
@ -838,12 +860,13 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
|||||||
owner_link = model.get("owner_link", 'https://github.com/ParisNeo')
|
owner_link = model.get("owner_link", 'https://github.com/ParisNeo')
|
||||||
filesize = int(model.get('filesize',0))
|
filesize = int(model.get('filesize',0))
|
||||||
description = model.get('description',"")
|
description = model.get('description',"")
|
||||||
|
model_type = model.get("model_type","")
|
||||||
if server.endswith("/"):
|
if server.endswith("/"):
|
||||||
path = f'{server}{filename}'
|
path = f'{server}{filename}'
|
||||||
else:
|
else:
|
||||||
path = f'{server}/{filename}'
|
path = f'{server}/{filename}'
|
||||||
local_path = Path(f'./models/{self.config["binding"]}/{filename}')
|
local_path = Path(f'./models/{self.config["binding"]}/{filename}')
|
||||||
is_installed = local_path.exists()
|
is_installed = local_path.exists() or model_type.lower()=="api"
|
||||||
models.append({
|
models.append({
|
||||||
'title': filename,
|
'title': filename,
|
||||||
'icon': image_url, # Replace with the path to the model icon
|
'icon': image_url, # Replace with the path to the model icon
|
||||||
@ -854,6 +877,7 @@ class Gpt4AllWebUI(GPT4AllAPI):
|
|||||||
'isInstalled': is_installed,
|
'isInstalled': is_installed,
|
||||||
'path': path,
|
'path': path,
|
||||||
'filesize': filesize,
|
'filesize': filesize,
|
||||||
|
'model_type': model_type
|
||||||
})
|
})
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print("#################################")
|
print("#################################")
|
||||||
|
@ -57,7 +57,7 @@ class CustomBinding(LLMBinding):
|
|||||||
Returns:
|
Returns:
|
||||||
list: A list of tokens representing the tokenized prompt.
|
list: A list of tokens representing the tokenized prompt.
|
||||||
"""
|
"""
|
||||||
return self.model.tokenize(prompt.encode())
|
return None
|
||||||
|
|
||||||
def detokenize(self, tokens_list):
|
def detokenize(self, tokens_list):
|
||||||
"""
|
"""
|
||||||
@ -69,7 +69,7 @@ class CustomBinding(LLMBinding):
|
|||||||
Returns:
|
Returns:
|
||||||
str: The detokenized text as a string.
|
str: The detokenized text as a string.
|
||||||
"""
|
"""
|
||||||
return self.model.detokenize(tokens_list)
|
return None
|
||||||
|
|
||||||
def generate(self,
|
def generate(self,
|
||||||
prompt:str,
|
prompt:str,
|
||||||
@ -87,17 +87,22 @@ class CustomBinding(LLMBinding):
|
|||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
output = ""
|
output = ""
|
||||||
self.model.reset()
|
|
||||||
tokens = self.model.tokenize(prompt)
|
|
||||||
count = 0
|
count = 0
|
||||||
generated_text = """
|
generated_text = """
|
||||||
This is an empty binding that shows how you can build your own binding.
|
This is an empty binding that shows how you can build your own binding.
|
||||||
Find it in bindings
|
Find it in bindings.
|
||||||
|
```python
|
||||||
|
# This is a python snippet
|
||||||
|
print("Hello World")
|
||||||
|
```
|
||||||
|
|
||||||
|
This is a photo
|
||||||
|
![](/images/icon.png)
|
||||||
"""
|
"""
|
||||||
for tok in re.split(r' |\n', generated_text):
|
for tok in re.split(r'(\s+)', generated_text):
|
||||||
if count >= n_predict or self.model.is_eos_token(tok):
|
if count >= n_predict:
|
||||||
break
|
break
|
||||||
word = self.model.detokenize(tok)
|
word = tok
|
||||||
if new_text_callback is not None:
|
if new_text_callback is not None:
|
||||||
if not new_text_callback(word):
|
if not new_text_callback(word):
|
||||||
break
|
break
|
||||||
|
BIN
bindings/backend_template/logo.png
Normal file
After Width: | Height: | Size: 237 KiB |
@ -5,3 +5,4 @@
|
|||||||
owner: Owner_name
|
owner: Owner_name
|
||||||
server: https://Path_to_the_server_to_load_from
|
server: https://Path_to_the_server_to_load_from
|
||||||
sha256: The Hash code of the file
|
sha256: The Hash code of the file
|
||||||
|
model_type: api # api or file
|
@ -106,10 +106,10 @@ class CTRansformers(LLMBinding):
|
|||||||
count = 0
|
count = 0
|
||||||
for tok in self.model.generate(
|
for tok in self.model.generate(
|
||||||
tokens,
|
tokens,
|
||||||
top_k=self.config['top_k'],
|
top_k=gpt_params['top_k'],
|
||||||
top_p=self.config['top_p'],
|
top_p=gpt_params['top_p'],
|
||||||
temperature=self.config['temperature'],
|
temperature=gpt_params['temperature'],
|
||||||
repetition_penalty=self.config['repeat_penalty'],
|
repetition_penalty=gpt_params['repeat_penalty'],
|
||||||
seed=self.config['seed'],
|
seed=self.config['seed'],
|
||||||
batch_size=1,
|
batch_size=1,
|
||||||
threads = self.config['n_threads'],
|
threads = self.config['n_threads'],
|
||||||
|
BIN
bindings/c_transformers/logo.png
Normal file
After Width: | Height: | Size: 268 KiB |
1
bindings/config_local.yaml
Normal file
@ -0,0 +1 @@
|
|||||||
|
pdf_file_path: ''
|
@ -96,10 +96,10 @@ class GPT4ALL(LLMBinding):
|
|||||||
self.model._response_callback = local_callback
|
self.model._response_callback = local_callback
|
||||||
self.model.generate(prompt,
|
self.model.generate(prompt,
|
||||||
n_predict=n_predict,
|
n_predict=n_predict,
|
||||||
temp=self.config['temperature'],
|
temp=gpt_params["temp"],
|
||||||
top_k=self.config['top_k'],
|
top_k=gpt_params['top_k'],
|
||||||
top_p=self.config['top_p'],
|
top_p=gpt_params['top_p'],
|
||||||
repeat_penalty=self.config['repeat_penalty'],
|
repeat_penalty=gpt_params['repeat_penalty'],
|
||||||
repeat_last_n = self.config['repeat_last_n'],
|
repeat_last_n = self.config['repeat_last_n'],
|
||||||
# n_threads=self.config['n_threads'],
|
# n_threads=self.config['n_threads'],
|
||||||
streaming=False,
|
streaming=False,
|
||||||
|
BIN
bindings/gpt_4all/logo.png
Normal file
After Width: | Height: | Size: 22 KiB |
@ -80,10 +80,10 @@ class GptJ(LLMBinding):
|
|||||||
output = ""
|
output = ""
|
||||||
for tok in self.model.generate(prompt,
|
for tok in self.model.generate(prompt,
|
||||||
n_predict=n_predict,
|
n_predict=n_predict,
|
||||||
temp=self.config['temperature'],
|
temp=gpt_params["temp"],
|
||||||
top_k=self.config['top_k'],
|
top_k=gpt_params['top_k'],
|
||||||
top_p=self.config['top_p'],
|
top_p=gpt_params['top_p'],
|
||||||
#repeat_penalty=self.config['repeat_penalty'],
|
#repeat_penalty=gpt_params['repeat_penalty'],
|
||||||
#repeat_last_n = self.config['repeat_last_n'],
|
#repeat_last_n = self.config['repeat_last_n'],
|
||||||
n_threads=self.config['n_threads'],
|
n_threads=self.config['n_threads'],
|
||||||
):
|
):
|
||||||
|
@ -83,10 +83,10 @@ class GPTJ(LLMBinding):
|
|||||||
seed=self.config['seed'],
|
seed=self.config['seed'],
|
||||||
n_threads=self.config['n_threads'],
|
n_threads=self.config['n_threads'],
|
||||||
n_predict=n_predict,
|
n_predict=n_predict,
|
||||||
top_k=self.config['top_k'],
|
top_k=gpt_params['top_k'],
|
||||||
top_p=self.config['top_p'],
|
top_p=gpt_params['top_p'],
|
||||||
temp=self.config['temperature'],
|
temp=gpt_params["temp"],
|
||||||
repeat_penalty=self.config['repeat_penalty'],
|
repeat_penalty=gpt_params['repeat_penalty'],
|
||||||
repeat_last_n=self.config['repeat_last_n'],
|
repeat_last_n=self.config['repeat_last_n'],
|
||||||
n_batch=8,
|
n_batch=8,
|
||||||
reset=True,
|
reset=True,
|
||||||
|
BIN
bindings/gptq/logo.png
Normal file
After Width: | Height: | Size: 357 KiB |
@ -87,10 +87,10 @@ class LLAMACPP(LLMBinding):
|
|||||||
tokens = self.model.tokenize(prompt.encode())
|
tokens = self.model.tokenize(prompt.encode())
|
||||||
count = 0
|
count = 0
|
||||||
for tok in self.model.generate(tokens,
|
for tok in self.model.generate(tokens,
|
||||||
temp=self.config['temperature'],
|
temp=gpt_params["temp"],
|
||||||
top_k=self.config['top_k'],
|
top_k=gpt_params['top_k'],
|
||||||
top_p=self.config['top_p'],
|
top_p=gpt_params['top_p'],
|
||||||
repeat_penalty=self.config['repeat_penalty'],
|
repeat_penalty=gpt_params['repeat_penalty'],
|
||||||
):
|
):
|
||||||
if count >= n_predict or (tok == self.model.token_eos()):
|
if count >= n_predict or (tok == self.model.token_eos()):
|
||||||
break
|
break
|
||||||
|
BIN
bindings/llama_cpp_official/logo.png
Normal file
After Width: | Height: | Size: 164 KiB |
@ -42,8 +42,10 @@ class OpenAIGPT(LLMBinding):
|
|||||||
# The local config can be used to store personal information that shouldn't be shared like chatgpt Key
|
# The local config can be used to store personal information that shouldn't be shared like chatgpt Key
|
||||||
# or other personal information
|
# or other personal information
|
||||||
# This file is never commited to the repository as it is ignored by .gitignore
|
# This file is never commited to the repository as it is ignored by .gitignore
|
||||||
|
self.config = config
|
||||||
self._local_config_file_path = Path(__file__).parent/"config_local.yaml"
|
self._local_config_file_path = Path(__file__).parent/"config_local.yaml"
|
||||||
self.config = load_config(self._local_config_file_path)
|
self.local_config = load_config(self._local_config_file_path)
|
||||||
|
openai.api_key = self.local_config["openai_key"]
|
||||||
|
|
||||||
# Do your initialization stuff
|
# Do your initialization stuff
|
||||||
|
|
||||||
@ -57,7 +59,7 @@ class OpenAIGPT(LLMBinding):
|
|||||||
Returns:
|
Returns:
|
||||||
list: A list of tokens representing the tokenized prompt.
|
list: A list of tokens representing the tokenized prompt.
|
||||||
"""
|
"""
|
||||||
return self.model.tokenize(prompt.encode())
|
return None
|
||||||
|
|
||||||
def detokenize(self, tokens_list):
|
def detokenize(self, tokens_list):
|
||||||
"""
|
"""
|
||||||
@ -69,7 +71,7 @@ class OpenAIGPT(LLMBinding):
|
|||||||
Returns:
|
Returns:
|
||||||
str: The detokenized text as a string.
|
str: The detokenized text as a string.
|
||||||
"""
|
"""
|
||||||
return self.model.detokenize(tokens_list)
|
return None
|
||||||
|
|
||||||
def generate(self,
|
def generate(self,
|
||||||
prompt:str,
|
prompt:str,
|
||||||
@ -86,35 +88,21 @@ class OpenAIGPT(LLMBinding):
|
|||||||
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
|
verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
output = ""
|
|
||||||
self.model.reset()
|
|
||||||
tokens = self.model.tokenize(prompt)
|
|
||||||
count = 0
|
|
||||||
generated_text = """
|
|
||||||
This is an empty binding that shows how you can build your own binding.
|
|
||||||
Find it in bindings
|
|
||||||
"""
|
|
||||||
|
|
||||||
response = openai.Completion.create(
|
response = openai.Completion.create(
|
||||||
engine='text-davinci-003', # Choose the engine according to your OpenAI plan
|
engine=self.config["model"], # Choose the engine according to your OpenAI plan
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
max_tokens=100, # Adjust the desired length of the generated response
|
max_tokens=n_predict, # Adjust the desired length of the generated response
|
||||||
n=1, # Specify the number of responses you want
|
n=1, # Specify the number of responses you want
|
||||||
stop=None, # Define a stop sequence if needed
|
stop=None, # Define a stop sequence if needed
|
||||||
temperature=0.7 # Adjust the temperature for more or less randomness in the output
|
temperature=gpt_params["temp"] # Adjust the temperature for more or less randomness in the output
|
||||||
)
|
)
|
||||||
for tok in re.split(r' |\n', generated_text):
|
|
||||||
if count >= n_predict or self.model.is_eos_token(tok):
|
# Extract the generated reply from the API response
|
||||||
break
|
reply = response.choices[0].text.strip()
|
||||||
word = self.model.detokenize(tok)
|
return reply
|
||||||
if new_text_callback is not None:
|
|
||||||
if not new_text_callback(word):
|
|
||||||
break
|
|
||||||
output += word
|
|
||||||
count += 1
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print(ex)
|
print(ex)
|
||||||
return output
|
return ""
|
||||||
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -3,7 +3,7 @@ from pathlib import Path
|
|||||||
import requests
|
import requests
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
from api.config import save_config
|
from api.config import save_config
|
||||||
|
import yaml
|
||||||
class Install:
|
class Install:
|
||||||
def __init__(self, api):
|
def __init__(self, api):
|
||||||
# Get the current directory
|
# Get the current directory
|
||||||
@ -11,7 +11,7 @@ class Install:
|
|||||||
install_file = current_dir / ".installed"
|
install_file = current_dir / ".installed"
|
||||||
|
|
||||||
if not install_file.exists():
|
if not install_file.exists():
|
||||||
print("-------------- Template binding -------------------------------")
|
print("-------------- OpenAI Binding -------------------------------")
|
||||||
print("This is the first time you are using this binding.")
|
print("This is the first time you are using this binding.")
|
||||||
print("Installing ...")
|
print("Installing ...")
|
||||||
# Step 2: Install dependencies using pip from requirements.txt
|
# Step 2: Install dependencies using pip from requirements.txt
|
||||||
@ -24,14 +24,12 @@ class Install:
|
|||||||
|
|
||||||
#Create
|
#Create
|
||||||
self._local_config_file_path = Path(__file__).parent/"config_local.yaml"
|
self._local_config_file_path = Path(__file__).parent/"config_local.yaml"
|
||||||
if not self._local_config_file_path.exists:
|
if not self._local_config_file_path.exists():
|
||||||
key = input("Please enter your Open AI Key")
|
key = input("Please enter your Open AI Key")
|
||||||
config={
|
config={
|
||||||
"openai_key":key
|
"openai_key":key
|
||||||
}
|
}
|
||||||
self.config = save_config(config, self._local_config_file_path)
|
self.config = save_config(config, self._local_config_file_path)
|
||||||
|
|
||||||
|
|
||||||
#Create the install file (a file that is used to insure the installation was done correctly)
|
#Create the install file (a file that is used to insure the installation was done correctly)
|
||||||
with open(install_file,"w") as f:
|
with open(install_file,"w") as f:
|
||||||
f.write("ok")
|
f.write("ok")
|
||||||
@ -42,3 +40,23 @@ class Install:
|
|||||||
"""
|
"""
|
||||||
subprocess.run(["pip", "install", "torch", "torchvision", "torchaudio", "--no-cache-dir", "--index-url", "https://download.pytorch.org/whl/cu117"])
|
subprocess.run(["pip", "install", "torch", "torchvision", "torchaudio", "--no-cache-dir", "--index-url", "https://download.pytorch.org/whl/cu117"])
|
||||||
|
|
||||||
|
|
||||||
|
def create_config_file(self):
|
||||||
|
"""
|
||||||
|
Create a config_local.yaml file with predefined data.
|
||||||
|
|
||||||
|
The function creates a config_local.yaml file with the specified data. The file is saved in the parent directory
|
||||||
|
of the current file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
None
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
None
|
||||||
|
"""
|
||||||
|
data = {
|
||||||
|
"pdf_file_path": "" # Path to the PDF that will be discussed
|
||||||
|
}
|
||||||
|
path = Path(__file__).parent.parent / 'config_local.yaml'
|
||||||
|
with open(path, 'w') as file:
|
||||||
|
yaml.dump(data, file)
|
BIN
bindings/open_ai/logo.png
Normal file
After Width: | Height: | Size: 6.1 KiB |
@ -1,8 +1,39 @@
|
|||||||
- description: ChatGPT model
|
- description: Most advanced language model by OpenAI based on GPT-3 architecture, offering powerful language generation capabilities.
|
||||||
icon: https://www.google.fr/url?sa=i&url=https%3A%2F%2Fcommons.wikimedia.org%2Fwiki%2FFile%3AChatGPT_logo.svg&psig=AOvVaw1rUG9Bl0WfHOYRJF7LgSmA&ust=1685107628710000&source=images&cd=vfe&ved=0CBEQjRxqFwoTCJDr0J7JkP8CFQAAAAAdAAAAABAE
|
icon: /bindings/open_ai/logo.png
|
||||||
filename: ChatGpt by Open AI
|
filename: gpt-3.5-turbo
|
||||||
license: Commercial
|
license: Commercial
|
||||||
owner_link: https://link_to_the_owner_web_page
|
owner_link: https://link_to_the_owner_web_page
|
||||||
owner: Open AI
|
owner: Open AI
|
||||||
server: https://openai.com
|
server: https://openai.com
|
||||||
sha256: NONE
|
sha256: ~
|
||||||
|
model_type: api
|
||||||
|
|
||||||
|
- description: Highly capable language model for generating high-quality text and performing various natural language processing tasks.
|
||||||
|
icon: /bindings/open_ai/logo.png
|
||||||
|
filename: text-davinci-003
|
||||||
|
license: Commercial
|
||||||
|
owner_link: https://link_to_the_owner_web_page
|
||||||
|
owner: Open AI
|
||||||
|
server: https://openai.com
|
||||||
|
sha256: ~
|
||||||
|
model_type: api
|
||||||
|
|
||||||
|
- description: Earlier version of the text-davinci model, offering similar functionality with potentially slightly different performance characteristics.
|
||||||
|
icon: /bindings/open_ai/logo.png
|
||||||
|
filename: text-davinci-002
|
||||||
|
license: Commercial
|
||||||
|
owner_link: https://link_to_the_owner_web_page
|
||||||
|
owner: Open AI
|
||||||
|
server: https://openai.com
|
||||||
|
sha256: ~
|
||||||
|
model_type: api
|
||||||
|
|
||||||
|
- description: Original version of the text-davinci model, providing strong language generation capabilities.
|
||||||
|
icon: /bindings/open_ai/logo.png
|
||||||
|
filename: text-davinci-001
|
||||||
|
license: Commercial
|
||||||
|
owner_link: https://link_to_the_owner_web_page
|
||||||
|
owner: Open AI
|
||||||
|
server: https://openai.com
|
||||||
|
sha256: ~
|
||||||
|
model_type: api
|
||||||
|
@ -1 +1,2 @@
|
|||||||
openai
|
openai
|
||||||
|
tiktoken
|
@ -83,10 +83,10 @@ class PyLLAMACPP(LLMBinding):
|
|||||||
output = ""
|
output = ""
|
||||||
for tok in self.model.generate(prompt,
|
for tok in self.model.generate(prompt,
|
||||||
n_predict=n_predict,
|
n_predict=n_predict,
|
||||||
temp=self.config['temperature'],
|
temp=gpt_params['temperature'],
|
||||||
top_k=self.config['top_k'],
|
top_k=gpt_params['top_k'],
|
||||||
top_p=self.config['top_p'],
|
top_p=gpt_params['top_p'],
|
||||||
repeat_penalty=self.config['repeat_penalty'],
|
repeat_penalty=gpt_params['repeat_penalty'],
|
||||||
repeat_last_n = self.config['repeat_last_n'],
|
repeat_last_n = self.config['repeat_last_n'],
|
||||||
n_threads=self.config['n_threads'],
|
n_threads=self.config['n_threads'],
|
||||||
):
|
):
|
||||||
|
BIN
bindings/py_llama_cpp/logo.png
Normal file
After Width: | Height: | Size: 237 KiB |
@ -4,4 +4,4 @@ In this short video we will look at bindings and models, how to select them and
|
|||||||
|
|
||||||
First, to select a binding, go to the settings tab then Bindings zoo.
|
First, to select a binding, go to the settings tab then Bindings zoo.
|
||||||
|
|
||||||
You will find a certain numbre of
|
You will find a certain numbre of bindings. Each binding as built by a person or a team. Make sure to visit their
|
BIN
images/icon.png
Normal file
After Width: | Height: | Size: 37 KiB |
@ -31,6 +31,11 @@
|
|||||||
<b>Version: </b>
|
<b>Version: </b>
|
||||||
{{ binding.version }}
|
{{ binding.version }}
|
||||||
</div>
|
</div>
|
||||||
|
<a :href="binding.link" target="_blank" class="flex items-center">
|
||||||
|
<i data-feather="github" class="w-5 m-1"></i>
|
||||||
|
<b>Link: </b>
|
||||||
|
{{ binding.link }}
|
||||||
|
</a>
|
||||||
</div>
|
</div>
|
||||||
<div class="flex items-center">
|
<div class="flex items-center">
|
||||||
<i data-feather="info" class="w-5 m-1"></i>
|
<i data-feather="info" class="w-5 m-1"></i>
|
||||||
|
@ -61,7 +61,7 @@
|
|||||||
<p class="mx-1 opacity-80">{{ description }}</p>
|
<p class="mx-1 opacity-80">{{ description }}</p>
|
||||||
</div>
|
</div>
|
||||||
<div class="flex-shrink-0" >
|
<div class="flex-shrink-0" >
|
||||||
<button class="px-4 py-2 rounded-md text-white font-bold transition-colors duration-300"
|
<button v-if="model_type !== 'api'" class="px-4 py-2 rounded-md text-white font-bold transition-colors duration-300"
|
||||||
:class="[isInstalled ? 'bg-red-500 hover:bg-red-600' : linkNotValid ? 'bg-gray-500 hover:bg-gray-600' : 'bg-green-500 hover:bg-green-600']"
|
:class="[isInstalled ? 'bg-red-500 hover:bg-red-600' : linkNotValid ? 'bg-gray-500 hover:bg-gray-600' : 'bg-green-500 hover:bg-green-600']"
|
||||||
:disabled="installing || uninstalling" @click.stop="toggleInstall">
|
:disabled="installing || uninstalling" @click.stop="toggleInstall">
|
||||||
<template v-if="installing">
|
<template v-if="installing">
|
||||||
@ -84,6 +84,7 @@
|
|||||||
{{ isInstalled ? model.isCustomModel ? 'Delete' : 'Uninstall' : linkNotValid ? 'Link is not valid':'Install' }}
|
{{ isInstalled ? model.isCustomModel ? 'Delete' : 'Uninstall' : linkNotValid ? 'Link is not valid':'Install' }}
|
||||||
</template>
|
</template>
|
||||||
</button>
|
</button>
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
@ -110,7 +111,8 @@ export default {
|
|||||||
onUninstall: Function,
|
onUninstall: Function,
|
||||||
onSelected: Function,
|
onSelected: Function,
|
||||||
selected: Boolean,
|
selected: Boolean,
|
||||||
model: Object
|
model: Object,
|
||||||
|
model_type: String
|
||||||
},
|
},
|
||||||
data() {
|
data() {
|
||||||
return {
|
return {
|
||||||
@ -136,8 +138,9 @@ export default {
|
|||||||
return filesize(size)
|
return filesize(size)
|
||||||
},
|
},
|
||||||
async getFileSize(url) {
|
async getFileSize(url) {
|
||||||
|
console.log(this.model_type);
|
||||||
|
if(this.model_type!="api"){
|
||||||
try {
|
try {
|
||||||
|
|
||||||
const res = await axios.head(url)
|
const res = await axios.head(url)
|
||||||
//console.log("addddd",url, res.headers)
|
//console.log("addddd",url, res.headers)
|
||||||
if (res) {
|
if (res) {
|
||||||
@ -183,6 +186,8 @@ export default {
|
|||||||
return 'Could not be determined'
|
return 'Could not be determined'
|
||||||
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
},
|
},
|
||||||
getImgUrl() {
|
getImgUrl() {
|
||||||
|
@ -179,7 +179,7 @@
|
|||||||
:title="model.title" :icon="model.icon" :path="model.path" :owner="model.owner"
|
:title="model.title" :icon="model.icon" :path="model.path" :owner="model.owner"
|
||||||
:owner_link="model.owner_link" :license="model.license" :description="model.description"
|
:owner_link="model.owner_link" :license="model.license" :description="model.description"
|
||||||
:is-installed="model.isInstalled" :on-install="onInstall" :on-uninstall="onUninstall"
|
:is-installed="model.isInstalled" :on-install="onInstall" :on-uninstall="onUninstall"
|
||||||
:on-selected="onSelected" :selected="model.title === configFile.model" :model="model" />
|
:on-selected="onSelected" :selected="model.title === configFile.model" :model="model" :model_type="model.model_type" />
|
||||||
</TransitionGroup>
|
</TransitionGroup>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|