mirror of
https://github.com/mudler/LocalAI.git
synced 2024-12-21 05:33:09 +00:00
34 lines
1.2 KiB
YAML
34 lines
1.2 KiB
YAML
|
backend: llama-cpp
|
||
|
context_size: 4096
|
||
|
f16: true
|
||
|
|
||
|
gpu_layers: 90
|
||
|
mmap: true
|
||
|
name: llava-1.6-mistral
|
||
|
|
||
|
roles:
|
||
|
user: "USER:"
|
||
|
assistant: "ASSISTANT:"
|
||
|
system: "SYSTEM:"
|
||
|
|
||
|
mmproj: llava-v1.6-7b-mmproj-f16.gguf
|
||
|
parameters:
|
||
|
model: llava-v1.6-mistral-7b.gguf
|
||
|
|
||
|
template:
|
||
|
chat: |
|
||
|
A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.
|
||
|
{{.Input}}
|
||
|
ASSISTANT:
|
||
|
|
||
|
download_files:
|
||
|
- filename: llava-v1.6-mistral-7b.gguf
|
||
|
uri: huggingface://cjpais/llava-1.6-mistral-7b-gguf/llava-v1.6-mistral-7b.Q6_K.gguf
|
||
|
- filename: llava-v1.6-7b-mmproj-f16.gguf
|
||
|
uri: huggingface://cjpais/llava-1.6-mistral-7b-gguf/mmproj-model-f16.gguf
|
||
|
|
||
|
usage: |
|
||
|
curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/json" -d '{
|
||
|
"model": "llava-1.6-mistral",
|
||
|
"messages": [{"role": "user", "content": [{"type":"text", "text": "What is in the image?"}, {"type": "image_url", "image_url": {"url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" }}], "temperature": 0.9}]}'
|