diff --git a/lollms/app.py b/lollms/app.py index 7c14ad1..cabc23f 100644 --- a/lollms/app.py +++ b/lollms/app.py @@ -921,7 +921,7 @@ class LollmsApplication(LoLLMsCom): else: query = current_message.content try: - docs, sorted_similarities, document_ids = self.personality.persona_data_vectorizer.recover_text(query, top_k=self.config.data_vectorization_nb_chunks) + docs, sorted_similarities, document_ids = self.personality.persona_data_vectorizer.recover_text(query, top_k=int(self.config.data_vectorization_nb_chunks)) for doc, infos, doc_id in zip(docs, sorted_similarities, document_ids): if self.config.data_vectorization_put_chunk_informations_into_context: documentation += f"{start_header_id_template}document chunk{end_header_id_template}\nchunk_infos:{infos}\ncontent:{doc}\n" @@ -959,7 +959,7 @@ class LollmsApplication(LoLLMsCom): else: documentation += f"{start_header_id_template}chunk{end_header_id_template}\n{content}\n" - docs, sorted_similarities, document_ids = client.discussion.vectorizer.recover_text(query, top_k=self.config.data_vectorization_nb_chunks) + docs, sorted_similarities, document_ids = client.discussion.vectorizer.recover_text(query, top_k=int(self.config.data_vectorization_nb_chunks)) for doc, infos in zip(docs, sorted_similarities): if self.config.data_vectorization_force_first_chunk and len(client.discussion.vectorizer.chunks)>0 and infos[0]==doc_id: continue diff --git a/lollms/functions/knowledge/build_knowledge_db.py b/lollms/functions/knowledge/build_knowledge_db.py index d609e4f..facb782 100644 --- a/lollms/functions/knowledge/build_knowledge_db.py +++ b/lollms/functions/knowledge/build_knowledge_db.py @@ -89,7 +89,7 @@ def buildKnowledgeDB(llm:APScript, data_store:TextVectorizer, data_folder_path:s qna_list=[] # Perform further processing with questions_vector for index, question in enumerate(questions_vector): - docs, sorted_similarities, document_ids = data_store.recover_text(question, top_k=llm.personality_config.data_vectorization_nb_chunks) + docs, sorted_similarities, document_ids = data_store.recover_text(question, top_k=int(llm.personality_config.data_vectorization_nb_chunks)) if llm.personality_config.use_enhanced_mode: llm.step_start(f"Verifying RAG data_{index}") prompt_text = """{llm.config.start_header_id_template}chunk: {{chunk}}