---
name: "vllm"

config_file: |
    context_size: 8192
    parameters:
      max_tokens: 8192
    backend: vllm
    function:
      disable_no_action: true
      grammar:
        disable: true
        parallel_calls: true
        expect_strings_after_json: true
    template:
      use_tokenizer_template: true
    # Uncomment to specify a quantization method (optional)
    # quantization: "awq"
    # Uncomment to set dtype, choices are: "auto", "half", "float16", "bfloat16", "float", "float32". awq on vLLM does not support bfloat16
    # dtype: "float16"
    # Uncomment to limit the GPU memory utilization (vLLM default is 0.9 for 90%)
    # gpu_memory_utilization: 0.5
    # Uncomment to trust remote code from huggingface
    # trust_remote_code: true
    # Uncomment to enable eager execution
    # enforce_eager: true
    # Uncomment to specify the size of the CPU swap space per GPU (in GiB)
    # swap_space: 2
    # Uncomment to specify the maximum length of a sequence (including prompt and output)
    # max_model_len: 32768
    # Uncomment and specify the number of Tensor divisions.
    # Allows you to partition and run large models. Performance gains are limited.
    # https://github.com/vllm-project/vllm/issues/1435
    # tensor_parallel_size: 2
    # Uncomment to disable log stats
    # disable_log_stats: true
    # Uncomment to specify Multi-Model limits per prompt, defaults to 1 per modality if not specified
    # limit_mm_per_prompt:
    #   image: 2
    #   video: 2
    #   audio: 2