name: codellama-7b
backend: transformers
type: AutoModelForCausalLM
parameters:
  model: codellama/CodeLlama-7b-hf
  temperature: 0.2
  top_k: 40
  top_p: 0.95

usage: |
      curl http://localhost:8080/v1/completions -H "Content-Type: application/json" -d '{
          "model": "codellama-7b",
          "prompt": "import socket\n\ndef ping_exponential_backoff(host: str):"
      }'