mirror of
https://github.com/mudler/LocalAI.git
synced 2024-12-24 23:06:42 +00:00
ea330d452d
* models(gallery): add mistral-0.3 and command-r, update functions Add also disable_parallel_new_lines to disable newlines in the JSON output when forcing parallel tools. Some models (like mistral) might be very sensible to that when being used for function calling. Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * models(gallery): add aya-23-8b Signed-off-by: Ettore Di Giacinto <mudler@localai.io> --------- Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
102 lines
2.9 KiB
YAML
102 lines
2.9 KiB
YAML
name: gpt-4
|
|
mmap: true
|
|
parameters:
|
|
model: huggingface://NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF/Hermes-2-Pro-Llama-3-8B-Q4_K_M.gguf
|
|
context_size: 8192
|
|
|
|
stopwords:
|
|
- "<|im_end|>"
|
|
- "<dummy32000>"
|
|
- "</tool_call>"
|
|
- "<|eot_id|>"
|
|
- "<|end_of_text|>"
|
|
|
|
function:
|
|
# disable injecting the "answer" tool
|
|
disable_no_action: true
|
|
|
|
grammar:
|
|
# This allows the grammar to also return messages
|
|
mixed_mode: true
|
|
# Suffix to add to the grammar
|
|
#prefix: '<tool_call>\n'
|
|
# Force parallel calls in the grammar
|
|
# parallel_calls: true
|
|
|
|
return_name_in_function_response: true
|
|
# Without grammar uncomment the lines below
|
|
# Warning: this is relying only on the capability of the
|
|
# LLM model to generate the correct function call.
|
|
json_regex_match:
|
|
- "(?s)<tool_call>(.*?)</tool_call>"
|
|
- "(?s)<tool_call>(.*?)"
|
|
replace_llm_results:
|
|
# Drop the scratchpad content from responses
|
|
- key: "(?s)<scratchpad>.*</scratchpad>"
|
|
value: ""
|
|
replace_function_results:
|
|
# Replace everything that is not JSON array or object
|
|
#
|
|
- key: '(?s)^[^{\[]*'
|
|
value: ""
|
|
- key: '(?s)[^}\]]*$'
|
|
value: ""
|
|
- key: "'([^']*?)'"
|
|
value: "_DQUOTE_${1}_DQUOTE_"
|
|
- key: '\\"'
|
|
value: "__TEMP_QUOTE__"
|
|
- key: "\'"
|
|
value: "'"
|
|
- key: "_DQUOTE_"
|
|
value: '"'
|
|
- key: "__TEMP_QUOTE__"
|
|
value: '"'
|
|
# Drop the scratchpad content from responses
|
|
- key: "(?s)<scratchpad>.*</scratchpad>"
|
|
value: ""
|
|
|
|
template:
|
|
chat: |
|
|
{{.Input -}}
|
|
<|im_start|>assistant
|
|
chat_message: |
|
|
<|im_start|>{{if eq .RoleName "assistant"}}assistant{{else if eq .RoleName "system"}}system{{else if eq .RoleName "tool"}}tool{{else if eq .RoleName "user"}}user{{end}}
|
|
{{- if .FunctionCall }}
|
|
<tool_call>
|
|
{{- else if eq .RoleName "tool" }}
|
|
<tool_response>
|
|
{{- end }}
|
|
{{- if .Content}}
|
|
{{.Content }}
|
|
{{- end }}
|
|
{{- if .FunctionCall}}
|
|
{{toJson .FunctionCall}}
|
|
{{- end }}
|
|
{{- if .FunctionCall }}
|
|
</tool_call>
|
|
{{- else if eq .RoleName "tool" }}
|
|
</tool_response>
|
|
{{- end }}<|im_end|>
|
|
completion: |
|
|
{{.Input}}
|
|
function: |-
|
|
<|im_start|>system
|
|
You are a function calling AI model.
|
|
Here are the available tools:
|
|
<tools>
|
|
{{range .Functions}}
|
|
{'type': 'function', 'function': {'name': '{{.Name}}', 'description': '{{.Description}}', 'parameters': {{toJson .Parameters}} }}
|
|
{{end}}
|
|
</tools>
|
|
You should call the tools provided to you sequentially
|
|
Please use <scratchpad> XML tags to record your reasoning and planning before you call the functions as follows:
|
|
<scratchpad>
|
|
{step-by-step reasoning and plan in bullet points}
|
|
</scratchpad>
|
|
For each function call return a json object with function name and arguments within <tool_call> XML tags as follows:
|
|
<tool_call>
|
|
{"arguments": <args-dict>, "name": <function-name>}
|
|
</tool_call><|im_end|>
|
|
{{.Input -}}
|
|
<|im_start|>assistant
|