diff --git a/gallery/index.yaml b/gallery/index.yaml
index c71e7425..24b4d65f 100644
--- a/gallery/index.yaml
+++ b/gallery/index.yaml
@@ -853,7 +853,7 @@
sha256: bac8e8c1d1d9d53cbdb148b8ff9ad378ddb392429207099e85b5aae3a43bff3d
uri: huggingface://cstr/salamandra-7b-instruct-GGUF/salamandra-7b-instruct.Q4_K_M-f32.gguf
- &llama32
- url: "github:mudler/LocalAI/gallery/llama3.2-quantized.yaml@master" ## llama3.2
+ url: "github:mudler/LocalAI/gallery/llama3.2-quantized.yaml@master"
icon: https://avatars.githubusercontent.com/u/153379578
license: llama3.2
description: |
@@ -1383,6 +1383,21 @@
- filename: FineMath-Llama-3B-Q4_K_M.gguf
sha256: 16c73b5cf2a417a7e1608bcc9469f1461fc3e759ce04a3a337f48df977dc158c
uri: huggingface://bartowski/FineMath-Llama-3B-GGUF/FineMath-Llama-3B-Q4_K_M.gguf
+- !!merge <<: *llama32
+ name: "LocalAI-functioncall-llama3.2-1b-v0.4"
+ url: "github:mudler/LocalAI/gallery/llama3.2-fcall.yaml@master"
+ urls:
+ - https://huggingface.co/mudler/LocalAI-functioncall-llama3.2-1b-v0.4
+ - https://huggingface.co/mradermacher/LocalAI-functioncall-llama3.2-1b-v0.4-GGUF
+ description: |
+ A model tailored to be conversational and execute function calls with LocalAI. This model is based on llama 3.2 and has 1B parameter. Perfect for small devices.
+ overrides:
+ parameters:
+ model: LocalAI-functioncall-llama3.2-1b-v0.4.Q8_0.gguf
+ files:
+ - filename: LocalAI-functioncall-llama3.2-1b-v0.4.Q8_0.gguf
+ sha256: 547e57c2d3f17c632c9fd303afdb00446e7396df453aee62633b76976c407616
+ uri: huggingface://mradermacher/LocalAI-functioncall-llama3.2-1b-v0.4-GGUF/LocalAI-functioncall-llama3.2-1b-v0.4.Q8_0.gguf
- &qwen25
name: "qwen2.5-14b-instruct" ## Qwen2.5
icon: https://avatars.githubusercontent.com/u/141221163
diff --git a/gallery/llama3.2-fcall.yaml b/gallery/llama3.2-fcall.yaml
new file mode 100644
index 00000000..0188045e
--- /dev/null
+++ b/gallery/llama3.2-fcall.yaml
@@ -0,0 +1,48 @@
+---
+name: "llama3.2-fcall"
+
+config_file: |
+ mmap: true
+ function:
+ json_regex_match:
+ - "(?s)"
+ capture_llm_results:
+ - (?s)(.*?)
+ replace_llm_results:
+ - key: (?s)(.*?)
+ value: ""
+ grammar:
+ properties_order: "name,arguments"
+ template:
+ chat: |
+ <|begin_of_text|><|start_header_id|>system<|end_header_id|>
+ You are a helpful assistant<|eot_id|><|start_header_id|>user<|end_header_id|>
+ {{.Input }}
+ <|start_header_id|>assistant<|end_header_id|>
+ chat_message: |
+ <|start_header_id|>{{if eq .RoleName "assistant"}}assistant{{else if eq .RoleName "system"}}system{{else if eq .RoleName "tool"}}tool{{else if eq .RoleName "user"}}user{{end}}<|end_header_id|>
+ {{ if .FunctionCall -}}
+ {{ else if eq .RoleName "tool" -}}
+ {{ end -}}
+ {{ if .Content -}}
+ {{.Content -}}
+ {{ else if .FunctionCall -}}
+ {{ toJson .FunctionCall -}}
+ {{ end -}}
+ <|eot_id|>
+ completion: |
+ {{.Input}}
+ function: |
+ <|start_header_id|>system<|end_header_id|>
+ You are an AI assistant that executes function calls, and these are the tools at your disposal:
+ {{range .Functions}}
+ {'type': 'function', 'function': {'name': '{{.Name}}', 'description': '{{.Description}}', 'parameters': {{toJson .Parameters}} }}
+ {{end}}
+ <|eot_id|>{{.Input}}<|start_header_id|>assistant<|end_header_id|>
+ context_size: 8192
+ f16: true
+ stopwords:
+ - <|im_end|>
+ -
+ - "<|eot_id|>"
+ - <|end_of_text|>