From 1a26259e86396dafe2151b7d5b2d0972b6843127 Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Sun, 8 Dec 2024 01:33:57 +0100 Subject: [PATCH] fixed problems --- configs/config.yaml | 2 +- lollms/configs/config.yaml | 2 +- lollms/personality.py | 12 ++++++------ 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/configs/config.yaml b/configs/config.yaml index 80f5ae7..3eff8d3 100644 --- a/configs/config.yaml +++ b/configs/config.yaml @@ -51,7 +51,7 @@ use_continue_message: true seed: -1 ctx_size: 4084 -max_n_predict: None +max_n_predict: 4084 min_n_predict: 1024 temperature: 0.9 top_k: 50 diff --git a/lollms/configs/config.yaml b/lollms/configs/config.yaml index 80f5ae7..3eff8d3 100644 --- a/lollms/configs/config.yaml +++ b/lollms/configs/config.yaml @@ -51,7 +51,7 @@ use_continue_message: true seed: -1 ctx_size: 4084 -max_n_predict: None +max_n_predict: 4084 min_n_predict: 1024 temperature: 0.9 top_k: 50 diff --git a/lollms/personality.py b/lollms/personality.py index 131c580..0a929c6 100644 --- a/lollms/personality.py +++ b/lollms/personality.py @@ -651,19 +651,19 @@ class AIPersonality: if debug == False: debug = self.config.debug - if max_generation_size is None: - prompt_size = self.model.tokenize(prompt) - max_generation_size = self.model.config.ctx_size - len(prompt_size) - pr = PromptReshaper(prompt) + prompt = pr.build(placeholders, self.model.tokenize, self.model.detokenize, - self.model.config.ctx_size - max_generation_size, + self.model.config.ctx_size - max_generation_size if max_generation_size else self.model.config.ctx_size - self.model.config.min_n_predict, sacrifice ) ntk = len(self.model.tokenize(prompt)) - max_generation_size = min(self.model.config.ctx_size - ntk, max_generation_size) + if max_generation_size: + max_generation_size = min(self.model.config.ctx_size - ntk, max_generation_size) + else: + max_generation_size = min(self.model.config.ctx_size - ntk,self.model.config.max_n_predict) # TODO : add show progress gen = self.generate_with_images(prompt, images, max_generation_size, callback=callback, show_progress=show_progress).strip().replace("", "").replace("", "")