mirror of
https://github.com/mudler/LocalAI.git
synced 2024-12-20 05:07:54 +00:00
models(gallery): add llama3-32k (#2183)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
93ca56086e
commit
ea13863221
41
gallery/chatml.yaml
Normal file
41
gallery/chatml.yaml
Normal file
@ -0,0 +1,41 @@
|
||||
---
|
||||
name: "chatml"
|
||||
|
||||
config_file: |
|
||||
mmap: true
|
||||
template:
|
||||
chat_message: |
|
||||
<|im_start|>{{ .RoleName }}
|
||||
{{- if .FunctionCall }}
|
||||
Function call:
|
||||
{{- else if eq .RoleName "tool" }}
|
||||
Function response:
|
||||
{{- end }}
|
||||
{{- if .Content}}
|
||||
{{.Content }}
|
||||
{{- end }}
|
||||
{{- if .FunctionCall}}
|
||||
{{toJson .FunctionCall}}
|
||||
{{- end }}
|
||||
<|im_end|>
|
||||
function: |
|
||||
<|im_start|>system
|
||||
You are a function calling AI model. You are provided with functions to execute. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools:
|
||||
{{range .Functions}}
|
||||
{'type': 'function', 'function': {'name': '{{.Name}}', 'description': '{{.Description}}', 'parameters': {{toJson .Parameters}} }}
|
||||
{{end}}
|
||||
For each function call return a json object with function name and arguments
|
||||
<|im_end|>
|
||||
{{.Input -}}
|
||||
<|im_start|>assistant
|
||||
<tool_call>
|
||||
chat: |
|
||||
{{.Input -}}
|
||||
<|im_start|>assistant
|
||||
completion: |
|
||||
{{.Input}}
|
||||
context_size: 4096
|
||||
f16: true
|
||||
stopwords:
|
||||
- <|im_end|>
|
||||
- <dummy32000>
|
@ -412,6 +412,25 @@
|
||||
- filename: dolphin-2.9-llama3-8b-q6_K.gguf
|
||||
sha256: 8aac72a0bd72c075ba7be1aa29945e47b07d39cd16be9a80933935f51b57fb32
|
||||
uri: huggingface://cognitivecomputations/dolphin-2.9-llama3-8b-gguf/dolphin-2.9-llama3-8b-q6_K.gguf
|
||||
- url: "github:mudler/LocalAI/gallery/chatml.yaml@master"
|
||||
name: "llama-3-8b-instruct-dpo-v0.3-32k"
|
||||
license: llama3
|
||||
urls:
|
||||
- https://huggingface.co/MaziyarPanahi/Llama-3-8B-Instruct-DPO-v0.3-32k-GGUF
|
||||
tags:
|
||||
- llm
|
||||
- gguf
|
||||
- gpu
|
||||
- cpu
|
||||
- llama3
|
||||
overrides:
|
||||
context_size: 32768
|
||||
parameters:
|
||||
model: Llama-3-8B-Instruct-DPO-v0.3.Q4_K_M.gguf
|
||||
files:
|
||||
- filename: Llama-3-8B-Instruct-DPO-v0.3.Q4_K_M.gguf
|
||||
sha256: 694c55b5215d03e59626cd4292076eaf31610ef27ba04737166766baa75d889f
|
||||
uri: huggingface://MaziyarPanahi/Llama-3-8B-Instruct-DPO-v0.3-32k-GGUF/Llama-3-8B-Instruct-DPO-v0.3.Q4_K_M.gguf
|
||||
## LLama2 and derivatives
|
||||
### Start Fimbulvetr
|
||||
- &vicuna-chat
|
||||
|
Loading…
Reference in New Issue
Block a user