From 2a6f70bd876f885b73bc6880197e74207d7b4c48 Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Mon, 29 May 2023 21:26:20 +0200 Subject: [PATCH] bugfixed --- api/__init__.py | 6 +++--- bindings/gpt_4all/__init__.py | 10 +++++----- docs/youtube/script_models.md | 3 +++ 3 files changed, 11 insertions(+), 8 deletions(-) create mode 100644 docs/youtube/script_models.md diff --git a/api/__init__.py b/api/__init__.py index 28d5afb8..311281cf 100644 --- a/api/__init__.py +++ b/api/__init__.py @@ -369,8 +369,8 @@ class ModelProcess: def _callback(self, text, text_type=0): try: - print(str(text),end="", flush=True) - except: + print(str(text), end="", flush=True) + except Exception as ex: print(".") self.curent_text += text detected_anti_prompt = False @@ -388,7 +388,7 @@ class ModelProcess: # Stream the generated text to the main process self.generation_queue.put((text,self.id, text_type)) # if stop generation is detected then stop - if self.completion_signal.is_set(): + if not self.completion_signal.is_set(): return True else: return False diff --git a/bindings/gpt_4all/__init__.py b/bindings/gpt_4all/__init__.py index 5430ecfa..edb3d485 100644 --- a/bindings/gpt_4all/__init__.py +++ b/bindings/gpt_4all/__init__.py @@ -82,12 +82,12 @@ class GPT4ALL(LLMBinding): verbose (bool, optional): If true, the code will spit many informations about the generation process. Defaults to False. """ try: - response_tokens = [] + response_text = [] def local_callback(token_id, response): - decoded_token = response.decode('utf-8') - response_tokens.append( decoded_token ); + decoded_word = response.decode('utf-8') + response_text.append( decoded_word ) if new_text_callback is not None: - if not new_text_callback(''.join(response_tokens)): + if not new_text_callback(decoded_word): return False # Do whatever you want with decoded_token here. @@ -106,7 +106,7 @@ class GPT4ALL(LLMBinding): ) except Exception as ex: print(ex) - return ''.join(response_tokens) + return ''.join(response_text) @staticmethod def get_available_models(): diff --git a/docs/youtube/script_models.md b/docs/youtube/script_models.md new file mode 100644 index 00000000..cce34c80 --- /dev/null +++ b/docs/youtube/script_models.md @@ -0,0 +1,3 @@ +Hi there, welcome to a snippet about bindings and models selection. + +In this short video we will look at bindings and models \ No newline at end of file