---
config_file: |
  backend: llama-cpp
  context_size: 8192
  f16: false
  name: cerbero

  template:
    completion: "{{.Input}}"
    chat: "Questa รจ una conversazione tra un umano ed un assistente AI.\n{{.Input}}\n[|Assistente|]  "
  roles:
    user: "[|Umano|] "
    system: "[|Umano|] "
    assistant: "[|Assistente|] "

  stopwords:
  - "[|Umano|]"

  trimsuffix:
  - "\n"