mirror of
https://github.com/ParisNeo/lollms-webui.git
synced 2024-12-24 06:36:37 +00:00
bugfix in using temperature instead of temp
This commit is contained in:
parent
b396d57e38
commit
145437ced0
@ -96,7 +96,7 @@ class GPT4ALL(LLMBinding):
|
||||
self.model._response_callback = local_callback
|
||||
self.model.generate(prompt,
|
||||
n_predict=n_predict,
|
||||
temp=gpt_params["temp"],
|
||||
temp=gpt_params["temperature"],
|
||||
top_k=gpt_params['top_k'],
|
||||
top_p=gpt_params['top_p'],
|
||||
repeat_penalty=gpt_params['repeat_penalty'],
|
||||
|
@ -80,7 +80,7 @@ class GptJ(LLMBinding):
|
||||
output = ""
|
||||
for tok in self.model.generate(prompt,
|
||||
n_predict=n_predict,
|
||||
temp=gpt_params["temp"],
|
||||
temp=gpt_params["temperature"],
|
||||
top_k=gpt_params['top_k'],
|
||||
top_p=gpt_params['top_p'],
|
||||
#repeat_penalty=gpt_params['repeat_penalty'],
|
||||
|
@ -85,7 +85,7 @@ class GPTJ(LLMBinding):
|
||||
n_predict=n_predict,
|
||||
top_k=gpt_params['top_k'],
|
||||
top_p=gpt_params['top_p'],
|
||||
temp=gpt_params["temp"],
|
||||
temp=gpt_params["temperature"],
|
||||
repeat_penalty=gpt_params['repeat_penalty'],
|
||||
repeat_last_n=self.config['repeat_last_n'],
|
||||
n_batch=8,
|
||||
|
@ -87,7 +87,7 @@ class LLAMACPP(LLMBinding):
|
||||
tokens = self.model.tokenize(prompt.encode())
|
||||
count = 0
|
||||
for tok in self.model.generate(tokens,
|
||||
temp=gpt_params["temp"],
|
||||
temp=gpt_params["temperature"],
|
||||
top_k=gpt_params['top_k'],
|
||||
top_p=gpt_params['top_p'],
|
||||
repeat_penalty=gpt_params['repeat_penalty'],
|
||||
|
@ -94,7 +94,7 @@ class OpenAIGPT(LLMBinding):
|
||||
max_tokens=n_predict, # Adjust the desired length of the generated response
|
||||
n=1, # Specify the number of responses you want
|
||||
stop=None, # Define a stop sequence if needed
|
||||
temperature=gpt_params["temp"] # Adjust the temperature for more or less randomness in the output
|
||||
temperature=gpt_params["temperature"] # Adjust the temperature for more or less randomness in the output
|
||||
)
|
||||
|
||||
# Extract the generated reply from the API response
|
||||
|
Loading…
Reference in New Issue
Block a user