mirror of
https://github.com/mudler/LocalAI.git
synced 2024-12-18 20:27:57 +00:00
models(gallery): add mathstral-7b-v0.1-imat (#2901)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
b5661d6302
commit
bd8e2320c3
@ -233,6 +233,21 @@
|
||||
- filename: "Mistral-7B-Instruct-v0.3.Q4_K_M.gguf"
|
||||
sha256: "14850c84ff9f06e9b51d505d64815d5cc0cea0257380353ac0b3d21b21f6e024"
|
||||
uri: "huggingface://MaziyarPanahi/Mistral-7B-Instruct-v0.3-GGUF/Mistral-7B-Instruct-v0.3.Q4_K_M.gguf"
|
||||
- !!merge <<: *mistral03
|
||||
name: "mathstral-7b-v0.1-imat"
|
||||
url: "github:mudler/LocalAI/gallery/mathstral.yaml@master"
|
||||
urls:
|
||||
- https://huggingface.co/mistralai/mathstral-7B-v0.1
|
||||
- https://huggingface.co/InferenceIllusionist/mathstral-7B-v0.1-iMat-GGUF
|
||||
description: |
|
||||
Mathstral 7B is a model specializing in mathematical and scientific tasks, based on Mistral 7B. You can read more in the official blog post https://mistral.ai/news/mathstral/.
|
||||
overrides:
|
||||
parameters:
|
||||
model: mathstral-7B-v0.1-iMat-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: mathstral-7B-v0.1-iMat-Q4_K_M.gguf
|
||||
sha256: 3ba94b7a8283ffa319c9ce23657f91ecf221ceada167c1253906cf56d72e8f90
|
||||
uri: huggingface://InferenceIllusionist/mathstral-7B-v0.1-iMat-GGUF/mathstral-7B-v0.1-iMat-Q4_K_M.gguf
|
||||
- &mudler
|
||||
### START mudler's LocalAI specific-models
|
||||
url: "github:mudler/LocalAI/gallery/mudler.yaml@master"
|
||||
|
67
gallery/mathstral.yaml
Normal file
67
gallery/mathstral.yaml
Normal file
@ -0,0 +1,67 @@
|
||||
---
|
||||
name: "mathstral"
|
||||
|
||||
config_file: |
|
||||
context_size: 8192
|
||||
mmap: true
|
||||
stopwords:
|
||||
- "<|im_end|>"
|
||||
- "<dummy32000>"
|
||||
- "</tool_call>"
|
||||
- "<|eot_id|>"
|
||||
- "<|end_of_text|>"
|
||||
- "</s>"
|
||||
- "[/TOOL_CALLS]"
|
||||
- "[/ACTIONS]"
|
||||
- "[/INST]"
|
||||
- "[INST]"
|
||||
|
||||
function:
|
||||
# disable injecting the "answer" tool
|
||||
disable_no_action: true
|
||||
|
||||
grammar:
|
||||
# This allows the grammar to also return messages
|
||||
#mixed_mode: true
|
||||
# Not all models have a sketchpad or something to write thoughts on.
|
||||
# This one will OR reply to strings OR JSON, but not both in the same reply
|
||||
#no_mixed_free_string: true
|
||||
# Disable grammar
|
||||
# Base instructor model doesn't work well with grammars
|
||||
disable: true
|
||||
parallel_calls: true
|
||||
disable_parallel_new_lines: true
|
||||
|
||||
return_name_in_function_response: true
|
||||
# Without grammar uncomment the lines below
|
||||
# Warning: this is relying only on the capability of the
|
||||
# LLM model to generate the correct function call.
|
||||
json_regex_match:
|
||||
- "(?s)\\[TOOL\\_CALLS\\](.*)"
|
||||
replace_function_results:
|
||||
# Replace everything that is not JSON array or object
|
||||
- key: '(?s)^[^{\[]*'
|
||||
value: ""
|
||||
- key: '(?s)[^}\]]*$'
|
||||
value: ""
|
||||
- key: "(?s)\\[TOOL\\_CALLS\\]"
|
||||
value: ""
|
||||
- key: "(?s)\\[\\/TOOL\\_CALLS\\]"
|
||||
value: ""
|
||||
|
||||
template:
|
||||
join_chat_messages_by_character: "" ## No newlines between messages
|
||||
chat: |
|
||||
{{.Input -}}
|
||||
chat_message: |-
|
||||
{{- if .FunctionCall -}}
|
||||
[TOOL_CALLS] {{toJson .FunctionCall}} [/TOOL_CALLS]
|
||||
{{- else if eq .RoleName "tool" -}}
|
||||
[TOOL_RESULTS] {{.Content}} [/TOOL_RESULTS]
|
||||
{{- else -}}
|
||||
[INST] {{.Content }} [/INST]
|
||||
{{ end -}}
|
||||
completion: |
|
||||
{{.Input}}
|
||||
function: |-
|
||||
[AVAILABLE_TOOLS] [{{range .Functions}}{"type": "function", "function": {"name": "{{.Name}}", "description": "{{.Description}}", "parameters": {{toJson .Parameters}} }}{{end}} ] [/AVAILABLE_TOOLS]{{.Input }}
|
Loading…
Reference in New Issue
Block a user