bugfix in using temperature instead of temp

This commit is contained in:
Saifeddine ALOUI 2023-05-30 01:07:46 +02:00
parent b396d57e38
commit 145437ced0
5 changed files with 5 additions and 5 deletions

View File

@ -96,7 +96,7 @@ class GPT4ALL(LLMBinding):
self.model._response_callback = local_callback
self.model.generate(prompt,
n_predict=n_predict,
temp=gpt_params["temp"],
temp=gpt_params["temperature"],
top_k=gpt_params['top_k'],
top_p=gpt_params['top_p'],
repeat_penalty=gpt_params['repeat_penalty'],

View File

@ -80,7 +80,7 @@ class GptJ(LLMBinding):
output = ""
for tok in self.model.generate(prompt,
n_predict=n_predict,
temp=gpt_params["temp"],
temp=gpt_params["temperature"],
top_k=gpt_params['top_k'],
top_p=gpt_params['top_p'],
#repeat_penalty=gpt_params['repeat_penalty'],

View File

@ -85,7 +85,7 @@ class GPTJ(LLMBinding):
n_predict=n_predict,
top_k=gpt_params['top_k'],
top_p=gpt_params['top_p'],
temp=gpt_params["temp"],
temp=gpt_params["temperature"],
repeat_penalty=gpt_params['repeat_penalty'],
repeat_last_n=self.config['repeat_last_n'],
n_batch=8,

View File

@ -87,7 +87,7 @@ class LLAMACPP(LLMBinding):
tokens = self.model.tokenize(prompt.encode())
count = 0
for tok in self.model.generate(tokens,
temp=gpt_params["temp"],
temp=gpt_params["temperature"],
top_k=gpt_params['top_k'],
top_p=gpt_params['top_p'],
repeat_penalty=gpt_params['repeat_penalty'],

View File

@ -94,7 +94,7 @@ class OpenAIGPT(LLMBinding):
max_tokens=n_predict, # Adjust the desired length of the generated response
n=1, # Specify the number of responses you want
stop=None, # Define a stop sequence if needed
temperature=gpt_params["temp"] # Adjust the temperature for more or less randomness in the output
temperature=gpt_params["temperature"] # Adjust the temperature for more or less randomness in the output
)
# Extract the generated reply from the API response