# =================== Lord Of Large Language Models Configuration file =========================== version: 46 binding_name: null model_name: null # Host information host: localhost port: 9600 # Genreration parameters discussion_prompt_separator: "!@>" seed: -1 n_predict: 1024 ctx_size: 4084 min_n_predict: 512 temperature: 0.9 top_k: 50 top_p: 0.95 repeat_last_n: 40 repeat_penalty: 1.2 n_threads: 8 #Personality parameters personalities: ["generic/lollms"] active_personality_id: 0 override_personality_model_parameters: false #if true the personality parameters are overriden by those of the configuration (may affect personality behaviour) extensions: [] user_name: user user_description: "" use_user_name_in_discussions: false user_avatar: default_user.svg use_user_informations_in_discussion: false # UI parameters db_path: database.db # Automatic updates debug: False auto_update: true auto_sync_personalities: true auto_sync_extensions: true auto_sync_bindings: true auto_sync_models: true auto_save: true auto_title: false # Install mode (cpu, cpu-noavx, nvidia-tensorcores, nvidia, amd-noavx, amd, apple-intel, apple-silicon) hardware_mode: nvidia-tensorcores # Automatically open the browser auto_show_browser: true # Voice service enable_voice_service: false xtts_base_url: http://127.0.0.1:8020 auto_read: false current_voice: null current_language: en # Image generation service enable_sd_service: false sd_base_url: http://127.0.0.1:7860 # ollama service enable_ollama_service: false ollama_base_url: http://0.0.0.0:11434 # petals service enable_petals_service: false petals_base_url: http://0.0.0.0:8010 # lollms service enable_lollms_service: false lollms_base_url: http://0.0.0.0:1234 # Audio media_on: false audio_in_language: 'en-US' auto_speak: false audio_out_voice: null audio_pitch: 1 audio_auto_send_input: true audio_silenceTimer: 5000 # Data vectorization use_discussions_history: false # Activate vectorizing previous conversations summerize_discussion: false # activate discussion summary (better but adds computation time) max_summary_size: 512 # in tokens data_vectorization_visualize_on_vectorization: false use_files: true # Activate using files data_vectorization_activate: true # To activate/deactivate data vectorization data_vectorization_method: "tfidf_vectorizer" #"model_embedding" or "tfidf_vectorizer" data_visualization_method: "PCA" #"PCA" or "TSNE" data_vectorization_save_db: False # For each new session, new files data_vectorization_chunk_size: 512 # chunk size data_vectorization_overlap_size: 128 # overlap between chunks size data_vectorization_nb_chunks: 2 # number of chunks to use data_vectorization_build_keys_words: false # If true, when querrying the database, we use keywords generated from the user prompt instead of the prompt itself. data_vectorization_force_first_chunk: false # If true, the first chunk of the document will systematically be used data_vectorization_make_persistance: false # If true, the data will be persistant webween runs # Helpers pdf_latex_path: null # boosting information positive_boost: null negative_boost: null force_output_language_to_be: null fun_mode: False