# =================== Lord Of Large Language Models Configuration file =========================== version: 27 binding_name: null model_name: null # Enables gpu usage enable_gpu: true # Host information host: localhost port: 9601 # Genreration parameters discussion_prompt_separator: "!@>" seed: -1 n_predict: 1024 ctx_size: 4048 min_n_predict: 256 temperature: 0.9 top_k: 50 top_p: 0.95 repeat_last_n: 40 repeat_penalty: 1.2 n_threads: 8 #Personality parameters personalities: ["generic/lollms"] active_personality_id: 0 override_personality_model_parameters: false #if true the personality parameters are overriden by those of the configuration (may affect personality behaviour) # Extensions extensions: [] # User infos user_name: user user_description: "" use_user_name_in_discussions: false user_avatar: default_user # Automatic update auto_update: false auto_save: true debug: false # Data vectorization use_files: true # Activate using files data_vectorization_visualize_on_vectorization: false # if active, the vectorizer wil lshow data whenever index is called data_vectorization_activate: true # To activate/deactivate data vectorization data_vectorization_method: "ftidf_vectorizer" #"model_embedding" or "ftidf_vectorizer" data_visualization_method: "PCA" #"PCA" or "TSNE" data_vectorization_save_db: False # For each new session, new files data_vectorization_chunk_size: 512 # chunk size data_vectorization_overlap_size: 128 # overlap between chunks size data_vectorization_nb_chunks: 2 # number of chunks to use data_vectorization_build_keys_words: false # If true, when querrying the database, we use keywords generated from the user prompt instead of the prompt itself.