chore(model gallery): remove dead icons and update LLAVA and DeepSeek ones (#4645)

* chore(model gallery): update icons and add LLAVA ones

Signed-off-by: Gianluca Boiano <morf3089@gmail.com>

* chore(model gallery): fix all complains related to yamllint

Signed-off-by: Gianluca Boiano <morf3089@gmail.com>

---------

Signed-off-by: Gianluca Boiano <morf3089@gmail.com>
This commit is contained in:
Gianluca Boiano 2025-01-20 16:13:19 +01:00 committed by GitHub
parent aeb1dca52e
commit a396040886
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -819,7 +819,7 @@
- filename: salamandra-7b-instruct.Q4_K_M-f32.gguf - filename: salamandra-7b-instruct.Q4_K_M-f32.gguf
sha256: bac8e8c1d1d9d53cbdb148b8ff9ad378ddb392429207099e85b5aae3a43bff3d sha256: bac8e8c1d1d9d53cbdb148b8ff9ad378ddb392429207099e85b5aae3a43bff3d
uri: huggingface://cstr/salamandra-7b-instruct-GGUF/salamandra-7b-instruct.Q4_K_M-f32.gguf uri: huggingface://cstr/salamandra-7b-instruct-GGUF/salamandra-7b-instruct.Q4_K_M-f32.gguf
- &llama32 ## llama3.2 - &llama32 ## llama3.2
url: "github:mudler/LocalAI/gallery/llama3.1-instruct.yaml@master" url: "github:mudler/LocalAI/gallery/llama3.1-instruct.yaml@master"
icon: https://avatars.githubusercontent.com/u/153379578 icon: https://avatars.githubusercontent.com/u/153379578
license: llama3.2 license: llama3.2
@ -1354,7 +1354,7 @@
- filename: FineMath-Llama-3B-Q4_K_M.gguf - filename: FineMath-Llama-3B-Q4_K_M.gguf
sha256: 16c73b5cf2a417a7e1608bcc9469f1461fc3e759ce04a3a337f48df977dc158c sha256: 16c73b5cf2a417a7e1608bcc9469f1461fc3e759ce04a3a337f48df977dc158c
uri: huggingface://bartowski/FineMath-Llama-3B-GGUF/FineMath-Llama-3B-Q4_K_M.gguf uri: huggingface://bartowski/FineMath-Llama-3B-GGUF/FineMath-Llama-3B-Q4_K_M.gguf
- &qwen25 ## Qwen2.5 - &qwen25 ## Qwen2.5
name: "qwen2.5-14b-instruct" name: "qwen2.5-14b-instruct"
icon: https://avatars.githubusercontent.com/u/141221163 icon: https://avatars.githubusercontent.com/u/141221163
url: "github:mudler/LocalAI/gallery/chatml.yaml@master" url: "github:mudler/LocalAI/gallery/chatml.yaml@master"
@ -2181,7 +2181,6 @@
sha256: 42cf7a96784dc8f25c61c2404620c3e6548a024caa8dff6e435d7c86400d7ab8 sha256: 42cf7a96784dc8f25c61c2404620c3e6548a024caa8dff6e435d7c86400d7ab8
uri: huggingface://mradermacher/Qwen2.5-7B-nerd-uncensored-v1.7-GGUF/Qwen2.5-7B-nerd-uncensored-v1.7.Q4_K_M.gguf uri: huggingface://mradermacher/Qwen2.5-7B-nerd-uncensored-v1.7-GGUF/Qwen2.5-7B-nerd-uncensored-v1.7.Q4_K_M.gguf
- !!merge <<: *qwen25 - !!merge <<: *qwen25
icon: https://i.imgur.com/OxX2Usi.png
name: "evathene-v1.0" name: "evathene-v1.0"
urls: urls:
- https://huggingface.co/sophosympatheia/Evathene-v1.0 - https://huggingface.co/sophosympatheia/Evathene-v1.0
@ -2540,7 +2539,6 @@
sha256: 91907f29746625a62885793475956220b81d8a5a34b53686a1acd1d03fd403ea sha256: 91907f29746625a62885793475956220b81d8a5a34b53686a1acd1d03fd403ea
uri: huggingface://bartowski/72B-Qwen2.5-Kunou-v1-GGUF/72B-Qwen2.5-Kunou-v1-Q4_K_M.gguf uri: huggingface://bartowski/72B-Qwen2.5-Kunou-v1-GGUF/72B-Qwen2.5-Kunou-v1-Q4_K_M.gguf
- !!merge <<: *qwen25 - !!merge <<: *qwen25
icon: https://i.imgur.com/OxX2Usi.png
name: "evathene-v1.3" name: "evathene-v1.3"
urls: urls:
- https://huggingface.co/sophosympatheia/Evathene-v1.3 - https://huggingface.co/sophosympatheia/Evathene-v1.3
@ -3276,7 +3274,7 @@
- filename: DRT-o1-14B-Q4_K_M.gguf - filename: DRT-o1-14B-Q4_K_M.gguf
sha256: 9619ca984cf4ce8e4f69bcde831de17b2ce05dd89536e3130608877521e3d328 sha256: 9619ca984cf4ce8e4f69bcde831de17b2ce05dd89536e3130608877521e3d328
uri: huggingface://bartowski/DRT-o1-14B-GGUF/DRT-o1-14B-Q4_K_M.gguf uri: huggingface://bartowski/DRT-o1-14B-GGUF/DRT-o1-14B-Q4_K_M.gguf
- &smollm ## SmolLM - &smollm ## SmolLM
url: "github:mudler/LocalAI/gallery/chatml.yaml@master" url: "github:mudler/LocalAI/gallery/chatml.yaml@master"
name: "smollm-1.7b-instruct" name: "smollm-1.7b-instruct"
icon: https://huggingface.co/datasets/HuggingFaceTB/images/resolve/main/banner_smol.png icon: https://huggingface.co/datasets/HuggingFaceTB/images/resolve/main/banner_smol.png
@ -3334,7 +3332,7 @@
- filename: Vikhr-Qwen-2.5-1.5B-Instruct.Q4_K_M.gguf - filename: Vikhr-Qwen-2.5-1.5B-Instruct.Q4_K_M.gguf
sha256: eaeac314e30b461413bc1cc819cdc0cd6a79265711fd0b8268702960a082c7bd sha256: eaeac314e30b461413bc1cc819cdc0cd6a79265711fd0b8268702960a082c7bd
uri: huggingface://QuantFactory/Vikhr-Qwen-2.5-1.5B-Instruct-GGUF/Vikhr-Qwen-2.5-1.5B-Instruct.Q4_K_M.gguf uri: huggingface://QuantFactory/Vikhr-Qwen-2.5-1.5B-Instruct-GGUF/Vikhr-Qwen-2.5-1.5B-Instruct.Q4_K_M.gguf
- &llama31 ## LLama3.1 - &llama31 ## LLama3.1
url: "github:mudler/LocalAI/gallery/llama3.1-instruct.yaml@master" url: "github:mudler/LocalAI/gallery/llama3.1-instruct.yaml@master"
icon: https://avatars.githubusercontent.com/u/153379578 icon: https://avatars.githubusercontent.com/u/153379578
name: "meta-llama-3.1-8b-instruct" name: "meta-llama-3.1-8b-instruct"
@ -4485,7 +4483,6 @@
sha256: 27b10c3ca4507e8bf7d305d60e5313b54ef5fffdb43a03f36223d19d906e39f3 sha256: 27b10c3ca4507e8bf7d305d60e5313b54ef5fffdb43a03f36223d19d906e39f3
uri: huggingface://mradermacher/L3.1-70Blivion-v0.1-rc1-70B-i1-GGUF/L3.1-70Blivion-v0.1-rc1-70B.i1-Q4_K_M.gguf uri: huggingface://mradermacher/L3.1-70Blivion-v0.1-rc1-70B-i1-GGUF/L3.1-70Blivion-v0.1-rc1-70B.i1-Q4_K_M.gguf
- !!merge <<: *llama31 - !!merge <<: *llama31
icon: https://i.imgur.com/sdN0Aqg.jpeg
name: "llama-3.1-hawkish-8b" name: "llama-3.1-hawkish-8b"
urls: urls:
- https://huggingface.co/mukaj/Llama-3.1-Hawkish-8B - https://huggingface.co/mukaj/Llama-3.1-Hawkish-8B
@ -5222,10 +5219,10 @@
- filename: Dolphin3.0-Llama3.1-8B-Q4_K_M.gguf - filename: Dolphin3.0-Llama3.1-8B-Q4_K_M.gguf
sha256: 268390e07edd407ad93ea21a868b7ae995b5950e01cad0db9e1802ae5049d405 sha256: 268390e07edd407ad93ea21a868b7ae995b5950e01cad0db9e1802ae5049d405
uri: huggingface://bartowski/Dolphin3.0-Llama3.1-8B-GGUF/Dolphin3.0-Llama3.1-8B-Q4_K_M.gguf uri: huggingface://bartowski/Dolphin3.0-Llama3.1-8B-GGUF/Dolphin3.0-Llama3.1-8B-Q4_K_M.gguf
- &deepseek ## Deepseek - &deepseek ## Deepseek
url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" url: "github:mudler/LocalAI/gallery/deepseek.yaml@master"
name: "deepseek-coder-v2-lite-instruct" name: "deepseek-coder-v2-lite-instruct"
icon: "https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/logo.svg?raw=true" icon: "https://avatars.githubusercontent.com/u/148330874"
license: deepseek license: deepseek
description: | description: |
DeepSeek-Coder-V2, an open-source Mixture-of-Experts (MoE) code language model that achieves performance comparable to GPT4-Turbo in code-specific tasks. Specifically, DeepSeek-Coder-V2 is further pre-trained from DeepSeek-Coder-V2-Base with 6 trillion tokens sourced from a high-quality and multi-source corpus. Through this continued pre-training, DeepSeek-Coder-V2 substantially enhances the coding and mathematical reasoning capabilities of DeepSeek-Coder-V2-Base, while maintaining comparable performance in general language tasks. Compared to DeepSeek-Coder, DeepSeek-Coder-V2 demonstrates significant advancements in various aspects of code-related tasks, as well as reasoning and general capabilities. Additionally, DeepSeek-Coder-V2 expands its support for programming languages from 86 to 338, while extending the context length from 16K to 128K. DeepSeek-Coder-V2, an open-source Mixture-of-Experts (MoE) code language model that achieves performance comparable to GPT4-Turbo in code-specific tasks. Specifically, DeepSeek-Coder-V2 is further pre-trained from DeepSeek-Coder-V2-Base with 6 trillion tokens sourced from a high-quality and multi-source corpus. Through this continued pre-training, DeepSeek-Coder-V2 substantially enhances the coding and mathematical reasoning capabilities of DeepSeek-Coder-V2-Base, while maintaining comparable performance in general language tasks. Compared to DeepSeek-Coder, DeepSeek-Coder-V2 demonstrates significant advancements in various aspects of code-related tasks, as well as reasoning and general capabilities. Additionally, DeepSeek-Coder-V2 expands its support for programming languages from 86 to 338, while extending the context length from 16K to 128K.
@ -5287,7 +5284,7 @@
- filename: archangel_sft_pythia2-8b.Q4_K_M.gguf - filename: archangel_sft_pythia2-8b.Q4_K_M.gguf
sha256: a47782c55ef2b39b19644213720a599d9849511a73c9ebb0c1de749383c0a0f8 sha256: a47782c55ef2b39b19644213720a599d9849511a73c9ebb0c1de749383c0a0f8
uri: huggingface://RichardErkhov/ContextualAI_-_archangel_sft_pythia2-8b-gguf/archangel_sft_pythia2-8b.Q4_K_M.gguf uri: huggingface://RichardErkhov/ContextualAI_-_archangel_sft_pythia2-8b-gguf/archangel_sft_pythia2-8b.Q4_K_M.gguf
- &qwen2 ## Start QWEN2 - &qwen2 ## Start QWEN2
url: "github:mudler/LocalAI/gallery/chatml.yaml@master" url: "github:mudler/LocalAI/gallery/chatml.yaml@master"
name: "qwen2-7b-instruct" name: "qwen2-7b-instruct"
icon: https://avatars.githubusercontent.com/u/141221163 icon: https://avatars.githubusercontent.com/u/141221163
@ -5647,7 +5644,7 @@
- filename: minicpm-v-2_6-mmproj-f16.gguf - filename: minicpm-v-2_6-mmproj-f16.gguf
sha256: f8a805e9e62085805c69c427287acefc284932eb4abfe6e1b1ce431d27e2f4e0 sha256: f8a805e9e62085805c69c427287acefc284932eb4abfe6e1b1ce431d27e2f4e0
uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/mmproj-model-f16.gguf uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/mmproj-model-f16.gguf
- &mistral03 ## START Mistral - &mistral03 ## START Mistral
url: "github:mudler/LocalAI/gallery/mistral-0.3.yaml@master" url: "github:mudler/LocalAI/gallery/mistral-0.3.yaml@master"
name: "mistral-7b-instruct-v0.3" name: "mistral-7b-instruct-v0.3"
icon: https://cdn-avatars.huggingface.co/v1/production/uploads/62dac1c7a8ead43d20e3e17a/wrLf5yaGC6ng4XME70w6Z.png icon: https://cdn-avatars.huggingface.co/v1/production/uploads/62dac1c7a8ead43d20e3e17a/wrLf5yaGC6ng4XME70w6Z.png
@ -6155,7 +6152,6 @@
- !!merge <<: *mistral03 - !!merge <<: *mistral03
name: "mn-12b-mag-mell-r1-iq-arm-imatrix" name: "mn-12b-mag-mell-r1-iq-arm-imatrix"
url: "github:mudler/LocalAI/gallery/chatml.yaml@master" url: "github:mudler/LocalAI/gallery/chatml.yaml@master"
icon: "https://i.imgur.com/wjyAaTO.png"
urls: urls:
- https://huggingface.co/inflatebot/MN-12B-Mag-Mell-R1 - https://huggingface.co/inflatebot/MN-12B-Mag-Mell-R1
- https://huggingface.co/Lewdiculous/MN-12B-Mag-Mell-R1-GGUF-IQ-ARM-Imatrix - https://huggingface.co/Lewdiculous/MN-12B-Mag-Mell-R1-GGUF-IQ-ARM-Imatrix
@ -6280,7 +6276,7 @@
- filename: Wayfarer-12B-Q4_K_M.gguf - filename: Wayfarer-12B-Q4_K_M.gguf
sha256: 6cd9f290c820c64854fcdcfd312b066447acc2f63abe2e2e71af9bc4f1946c08 sha256: 6cd9f290c820c64854fcdcfd312b066447acc2f63abe2e2e71af9bc4f1946c08
uri: huggingface://bartowski/Wayfarer-12B-GGUF/Wayfarer-12B-Q4_K_M.gguf uri: huggingface://bartowski/Wayfarer-12B-GGUF/Wayfarer-12B-Q4_K_M.gguf
- &mudler ### START mudler's LocalAI specific-models - &mudler ### START mudler's LocalAI specific-models
url: "github:mudler/LocalAI/gallery/mudler.yaml@master" url: "github:mudler/LocalAI/gallery/mudler.yaml@master"
name: "LocalAI-llama3-8b-function-call-v0.2" name: "LocalAI-llama3-8b-function-call-v0.2"
icon: "https://cdn-uploads.huggingface.co/production/uploads/647374aa7ff32a81ac6d35d4/us5JKi9z046p8K-cn_M0w.webp" icon: "https://cdn-uploads.huggingface.co/production/uploads/647374aa7ff32a81ac6d35d4/us5JKi9z046p8K-cn_M0w.webp"
@ -6325,7 +6321,7 @@
- filename: Mirai-Nova-Llama3-LocalAI-8B-v0.1-q4_k_m.bin - filename: Mirai-Nova-Llama3-LocalAI-8B-v0.1-q4_k_m.bin
sha256: 579cbb229f9c11d0330759ff4733102d2491615a4c61289e26c09d1b3a583fec sha256: 579cbb229f9c11d0330759ff4733102d2491615a4c61289e26c09d1b3a583fec
uri: huggingface://mudler/Mirai-Nova-Llama3-LocalAI-8B-v0.1-GGUF/Mirai-Nova-Llama3-LocalAI-8B-v0.1-q4_k_m.bin uri: huggingface://mudler/Mirai-Nova-Llama3-LocalAI-8B-v0.1-GGUF/Mirai-Nova-Llama3-LocalAI-8B-v0.1-q4_k_m.bin
- &parler-tts ### START parler-tts - &parler-tts ### START parler-tts
url: "github:mudler/LocalAI/gallery/parler-tts.yaml@master" url: "github:mudler/LocalAI/gallery/parler-tts.yaml@master"
name: parler-tts-mini-v0.1 name: parler-tts-mini-v0.1
overrides: overrides:
@ -6342,7 +6338,7 @@
- cpu - cpu
- text-to-speech - text-to-speech
- python - python
- &rerankers ### START rerankers - &rerankers ### START rerankers
url: "github:mudler/LocalAI/gallery/rerankers.yaml@master" url: "github:mudler/LocalAI/gallery/rerankers.yaml@master"
name: cross-encoder name: cross-encoder
parameters: parameters:
@ -7265,10 +7261,9 @@
name: "l3-8b-stheno-v3.1" name: "l3-8b-stheno-v3.1"
urls: urls:
- https://huggingface.co/Sao10K/L3-8B-Stheno-v3.1 - https://huggingface.co/Sao10K/L3-8B-Stheno-v3.1
icon: https://w.forfun.com/fetch/cb/cba2205390e517bea1ea60ca0b491af4.jpeg
description: | description: |
- A model made for 1-on-1 Roleplay ideally, but one that is able to handle scenarios, RPGs and storywriting fine. - A model made for 1-on-1 Roleplay ideally, but one that is able to handle scenarios, RPGs and storywriting fine.
- Uncensored during actual roleplay scenarios. # I do not care for zero-shot prompting like what some people do. It is uncensored enough in actual usecases. - Uncensored during actual roleplay scenarios. # I do not care for zero-shot prompting like what some people do. It is uncensored enough in actual usecases.
- I quite like the prose and style for this model. - I quite like the prose and style for this model.
overrides: overrides:
parameters: parameters:
@ -8059,7 +8054,6 @@
urls: urls:
- https://huggingface.co/bartowski/New-Dawn-Llama-3-70B-32K-v1.0-GGUF - https://huggingface.co/bartowski/New-Dawn-Llama-3-70B-32K-v1.0-GGUF
- https://huggingface.co/sophosympatheia/New-Dawn-Llama-3-70B-32K-v1.0 - https://huggingface.co/sophosympatheia/New-Dawn-Llama-3-70B-32K-v1.0
icon: https://imgur.com/tKzncGo.png
description: | description: |
This model is a multi-level SLERP merge of several Llama 3 70B variants. See the merge recipe below for details. I extended the context window for this model out to 32K by snagging some layers from abacusai/Smaug-Llama-3-70B-Instruct-32K using a technique similar to what I used for Midnight Miqu, which was further honed by jukofyork. This model is a multi-level SLERP merge of several Llama 3 70B variants. See the merge recipe below for details. I extended the context window for this model out to 32K by snagging some layers from abacusai/Smaug-Llama-3-70B-Instruct-32K using a technique similar to what I used for Midnight Miqu, which was further honed by jukofyork.
This model is uncensored. You are responsible for whatever you do with it. This model is uncensored. You are responsible for whatever you do with it.
@ -8411,7 +8405,8 @@
- filename: dolphin-2.9.2-Phi-3-Medium-abliterated-Q4_K_M.gguf - filename: dolphin-2.9.2-Phi-3-Medium-abliterated-Q4_K_M.gguf
sha256: 566331c2efe87725310aacb709ca15088a0063fa0ddc14a345bf20d69982156b sha256: 566331c2efe87725310aacb709ca15088a0063fa0ddc14a345bf20d69982156b
uri: huggingface://bartowski/dolphin-2.9.2-Phi-3-Medium-abliterated-GGUF/dolphin-2.9.2-Phi-3-Medium-abliterated-Q4_K_M.gguf uri: huggingface://bartowski/dolphin-2.9.2-Phi-3-Medium-abliterated-GGUF/dolphin-2.9.2-Phi-3-Medium-abliterated-Q4_K_M.gguf
- url: "github:mudler/LocalAI/gallery/chatml.yaml@master" - !!merge <<: *llama3
url: "github:mudler/LocalAI/gallery/chatml.yaml@master"
name: "llama-3-8b-instruct-dpo-v0.3-32k" name: "llama-3-8b-instruct-dpo-v0.3-32k"
license: llama3 license: llama3
urls: urls:
@ -8595,7 +8590,7 @@
- filename: Copus-2x8B.i1-Q4_K_M.gguf - filename: Copus-2x8B.i1-Q4_K_M.gguf
sha256: 685da1ba49e203e8f491105585143d76044286d4b4687bed37d325f6b55501e5 sha256: 685da1ba49e203e8f491105585143d76044286d4b4687bed37d325f6b55501e5
uri: huggingface://mradermacher/Copus-2x8B-i1-GGUF/Copus-2x8B.i1-Q4_K_M.gguf uri: huggingface://mradermacher/Copus-2x8B-i1-GGUF/Copus-2x8B.i1-Q4_K_M.gguf
- &yi-chat ### Start Yi - &yi-chat ### Start Yi
url: "github:mudler/LocalAI/gallery/chatml.yaml@master" url: "github:mudler/LocalAI/gallery/chatml.yaml@master"
icon: "https://github.com/01-ai/Yi/raw/main/assets/img/Yi_logo_icon_light.svg" icon: "https://github.com/01-ai/Yi/raw/main/assets/img/Yi_logo_icon_light.svg"
name: "yi-1.5-9b-chat" name: "yi-1.5-9b-chat"
@ -8806,7 +8801,7 @@
- filename: Fimbulvetr-11B-v2-Q4_K_M-imat.gguf - filename: Fimbulvetr-11B-v2-Q4_K_M-imat.gguf
sha256: 3f309b59508342536a70edd6c4be6cf4f2cb97f2e32cbc79ad2ab3f4c02933a4 sha256: 3f309b59508342536a70edd6c4be6cf4f2cb97f2e32cbc79ad2ab3f4c02933a4
uri: huggingface://Lewdiculous/Fimbulvetr-11B-v2-GGUF-IQ-Imatrix/Fimbulvetr-11B-v2-Q4_K_M-imat.gguf uri: huggingface://Lewdiculous/Fimbulvetr-11B-v2-GGUF-IQ-Imatrix/Fimbulvetr-11B-v2-Q4_K_M-imat.gguf
- &noromaid ### Start noromaid - &noromaid ### Start noromaid
url: "github:mudler/LocalAI/gallery/noromaid.yaml@master" url: "github:mudler/LocalAI/gallery/noromaid.yaml@master"
name: "noromaid-13b-0.4-DPO" name: "noromaid-13b-0.4-DPO"
icon: https://cdn-uploads.huggingface.co/production/uploads/630dfb008df86f1e5becadc3/VKX2Z2yjZX5J8kXzgeCYO.png icon: https://cdn-uploads.huggingface.co/production/uploads/630dfb008df86f1e5becadc3/VKX2Z2yjZX5J8kXzgeCYO.png
@ -8826,7 +8821,7 @@
- filename: Noromaid-13B-0.4-DPO.q4_k_m.gguf - filename: Noromaid-13B-0.4-DPO.q4_k_m.gguf
sha256: cb28e878d034fae3d0b43326c5fc1cfb4ab583b17c56e41d6ce023caec03c1c1 sha256: cb28e878d034fae3d0b43326c5fc1cfb4ab583b17c56e41d6ce023caec03c1c1
uri: huggingface://NeverSleep/Noromaid-13B-0.4-DPO-GGUF/Noromaid-13B-0.4-DPO.q4_k_m.gguf uri: huggingface://NeverSleep/Noromaid-13B-0.4-DPO-GGUF/Noromaid-13B-0.4-DPO.q4_k_m.gguf
- &wizardlm2 ### START Vicuna based - &wizardlm2 ### START Vicuna based
url: "github:mudler/LocalAI/gallery/wizardlm2.yaml@master" url: "github:mudler/LocalAI/gallery/wizardlm2.yaml@master"
name: "wizardlm2-7b" name: "wizardlm2-7b"
description: | description: |
@ -8881,7 +8876,9 @@
- filename: moondream2-mmproj-f16.gguf - filename: moondream2-mmproj-f16.gguf
sha256: 4cc1cb3660d87ff56432ebeb7884ad35d67c48c7b9f6b2856f305e39c38eed8f sha256: 4cc1cb3660d87ff56432ebeb7884ad35d67c48c7b9f6b2856f305e39c38eed8f
uri: huggingface://moondream/moondream2-gguf/moondream2-mmproj-f16.gguf uri: huggingface://moondream/moondream2-gguf/moondream2-mmproj-f16.gguf
- &llava ### START LLaVa - &llava ### START LLaVa
name: "llava-1.6-vicuna"
icon: https://github.com/lobehub/lobe-icons/raw/master/packages/static-png/dark/llava-color.png
url: "github:mudler/LocalAI/gallery/llava.yaml@master" url: "github:mudler/LocalAI/gallery/llava.yaml@master"
license: apache-2.0 license: apache-2.0
description: | description: |
@ -8895,7 +8892,6 @@
- gpu - gpu
- llama2 - llama2
- cpu - cpu
name: "llava-1.6-vicuna"
overrides: overrides:
mmproj: mmproj-vicuna7b-f16.gguf mmproj: mmproj-vicuna7b-f16.gguf
parameters: parameters:
@ -9363,7 +9359,6 @@
June 18, 2024 Update, After extensive testing of the intermediate checkpoints, significant progress has been made. June 18, 2024 Update, After extensive testing of the intermediate checkpoints, significant progress has been made.
The model is slowly — I mean, really slowly — unlearning its alignment. By significantly lowering the learning rate, I was able to visibly observe deep behavioral changes, this process is taking longer than anticipated, but it's going to be worth it. Estimated time to completion: 4 more days.. I'm pleased to report that in several tests, the model not only maintained its intelligence but actually showed a slight improvement, especially in terms of common sense. An intermediate checkpoint of this model was used to create invisietch/EtherealRainbow-v0.3-rc7, with promising results. Currently, it seems like I'm on the right track. I hope this model will serve as a solid foundation for further merges, whether for role-playing (RP) or for uncensoring. This approach also allows us to save on actual fine-tuning, thereby reducing our carbon footprint. The merge process takes just a few minutes of CPU time, instead of days of GPU work. The model is slowly — I mean, really slowly — unlearning its alignment. By significantly lowering the learning rate, I was able to visibly observe deep behavioral changes, this process is taking longer than anticipated, but it's going to be worth it. Estimated time to completion: 4 more days.. I'm pleased to report that in several tests, the model not only maintained its intelligence but actually showed a slight improvement, especially in terms of common sense. An intermediate checkpoint of this model was used to create invisietch/EtherealRainbow-v0.3-rc7, with promising results. Currently, it seems like I'm on the right track. I hope this model will serve as a solid foundation for further merges, whether for role-playing (RP) or for uncensoring. This approach also allows us to save on actual fine-tuning, thereby reducing our carbon footprint. The merge process takes just a few minutes of CPU time, instead of days of GPU work.
June 20, 2024 Update, Unaligning was partially successful, and the results are decent, but I am not fully satisfied. I decided to bite the bullet, and do a full finetune, god have mercy on my GPUs. I am also releasing the intermediate checkpoint of this model. June 20, 2024 Update, Unaligning was partially successful, and the results are decent, but I am not fully satisfied. I decided to bite the bullet, and do a full finetune, god have mercy on my GPUs. I am also releasing the intermediate checkpoint of this model.
icon: https://i.imgur.com/Kpk1PgZ.png
overrides: overrides:
parameters: parameters:
model: LLAMA-3_8B_Unaligned_Alpha-Q4_K_M.gguf model: LLAMA-3_8B_Unaligned_Alpha-Q4_K_M.gguf
@ -9389,7 +9384,6 @@
uri: huggingface://bartowski/L3-8B-Lunaris-v1-GGUF/L3-8B-Lunaris-v1-Q4_K_M.gguf uri: huggingface://bartowski/L3-8B-Lunaris-v1-GGUF/L3-8B-Lunaris-v1-Q4_K_M.gguf
- !!merge <<: *llama3 - !!merge <<: *llama3
name: "llama-3_8b_unaligned_alpha_rp_soup-i1" name: "llama-3_8b_unaligned_alpha_rp_soup-i1"
icon: https://i.imgur.com/pXcjpoV.png
urls: urls:
- https://huggingface.co/SicariusSicariiStuff/LLAMA-3_8B_Unaligned_Alpha_RP_Soup - https://huggingface.co/SicariusSicariiStuff/LLAMA-3_8B_Unaligned_Alpha_RP_Soup
- https://huggingface.co/mradermacher/LLAMA-3_8B_Unaligned_Alpha_RP_Soup-i1-GGUF - https://huggingface.co/mradermacher/LLAMA-3_8B_Unaligned_Alpha_RP_Soup-i1-GGUF
@ -9739,7 +9733,7 @@
- filename: Freyja-v4.95-maldv-7b-NON-FICTION.i1-Q4_K_M.gguf - filename: Freyja-v4.95-maldv-7b-NON-FICTION.i1-Q4_K_M.gguf
sha256: cdc0f4de6df2ba120835fbd25c2a0ae2af8548f46d2c40c7a018c51c3d19e0c0 sha256: cdc0f4de6df2ba120835fbd25c2a0ae2af8548f46d2c40c7a018c51c3d19e0c0
uri: huggingface://mradermacher/Freyja-v4.95-maldv-7b-NON-FICTION-i1-GGUF/Freyja-v4.95-maldv-7b-NON-FICTION.i1-Q4_K_M.gguf uri: huggingface://mradermacher/Freyja-v4.95-maldv-7b-NON-FICTION-i1-GGUF/Freyja-v4.95-maldv-7b-NON-FICTION.i1-Q4_K_M.gguf
- &chatml ### ChatML - &chatml ### ChatML
url: "github:mudler/LocalAI/gallery/chatml.yaml@master" url: "github:mudler/LocalAI/gallery/chatml.yaml@master"
name: "una-thepitbull-21.4b-v2" name: "una-thepitbull-21.4b-v2"
license: afl-3.0 license: afl-3.0
@ -9787,7 +9781,6 @@
sha256: 9c90f3a65332a03a6cbb563eee19c7586d9544f646ff9f33f7f1904b3d415ae2 sha256: 9c90f3a65332a03a6cbb563eee19c7586d9544f646ff9f33f7f1904b3d415ae2
uri: huggingface://nold/HelpingAI-9B-GGUF/HelpingAI-9B_Q4_K_M.gguf uri: huggingface://nold/HelpingAI-9B-GGUF/HelpingAI-9B_Q4_K_M.gguf
- url: "github:mudler/LocalAI/gallery/chatml-hercules.yaml@master" - url: "github:mudler/LocalAI/gallery/chatml-hercules.yaml@master"
icon: "https://tse3.mm.bing.net/th/id/OIG1.vnrl3xpEcypR3McLW63q?pid=ImgGn"
urls: urls:
- https://huggingface.co/Locutusque/Llama-3-Hercules-5.0-8B - https://huggingface.co/Locutusque/Llama-3-Hercules-5.0-8B
- https://huggingface.co/bartowski/Llama-3-Hercules-5.0-8B-GGUF - https://huggingface.co/bartowski/Llama-3-Hercules-5.0-8B-GGUF
@ -10025,7 +10018,7 @@
- filename: Triangulum-10B.Q4_K_M.gguf - filename: Triangulum-10B.Q4_K_M.gguf
sha256: dd071f99edf6b166044bf229cdeec19419c4c348e3fc3d6587cfcc55e6fb85fa sha256: dd071f99edf6b166044bf229cdeec19419c4c348e3fc3d6587cfcc55e6fb85fa
uri: huggingface://mradermacher/Triangulum-10B-GGUF/Triangulum-10B.Q4_K_M.gguf uri: huggingface://mradermacher/Triangulum-10B-GGUF/Triangulum-10B.Q4_K_M.gguf
- &command-R ### START Command-r - &command-R ### START Command-r
url: "github:mudler/LocalAI/gallery/command-r.yaml@master" url: "github:mudler/LocalAI/gallery/command-r.yaml@master"
name: "command-r-v01:q1_s" name: "command-r-v01:q1_s"
license: "cc-by-nc-4.0" license: "cc-by-nc-4.0"
@ -10080,7 +10073,7 @@
- filename: "aya-23-35B-Q4_K_M.gguf" - filename: "aya-23-35B-Q4_K_M.gguf"
sha256: "57824768c1a945e21e028c8e9a29b39adb4838d489f5865c82601ab9ad98065d" sha256: "57824768c1a945e21e028c8e9a29b39adb4838d489f5865c82601ab9ad98065d"
uri: "huggingface://bartowski/aya-23-35B-GGUF/aya-23-35B-Q4_K_M.gguf" uri: "huggingface://bartowski/aya-23-35B-GGUF/aya-23-35B-Q4_K_M.gguf"
- &phi-2-chat ### START Phi-2 - &phi-2-chat ### START Phi-2
url: "github:mudler/LocalAI/gallery/phi-2-chat.yaml@master" url: "github:mudler/LocalAI/gallery/phi-2-chat.yaml@master"
license: mit license: mit
description: | description: |
@ -10202,7 +10195,7 @@
- filename: internlm3-8b-instruct-Q4_K_M.gguf - filename: internlm3-8b-instruct-Q4_K_M.gguf
uri: huggingface://bartowski/internlm3-8b-instruct-GGUF/internlm3-8b-instruct-Q4_K_M.gguf uri: huggingface://bartowski/internlm3-8b-instruct-GGUF/internlm3-8b-instruct-Q4_K_M.gguf
sha256: 2a9644687318e8659c9cf9b40730d5cc2f5af06f786a50439c7c51359b23896e sha256: 2a9644687318e8659c9cf9b40730d5cc2f5af06f786a50439c7c51359b23896e
- &phi-3 ### START Phi-3 - &phi-3 ### START Phi-3
url: "github:mudler/LocalAI/gallery/phi-3-chat.yaml@master" url: "github:mudler/LocalAI/gallery/phi-3-chat.yaml@master"
name: "phi-3-mini-4k-instruct" name: "phi-3-mini-4k-instruct"
icon: https://avatars.githubusercontent.com/u/6154722 icon: https://avatars.githubusercontent.com/u/6154722
@ -10402,7 +10395,7 @@
- filename: Phi-3.5-MoE-instruct-Q4_K_M.gguf - filename: Phi-3.5-MoE-instruct-Q4_K_M.gguf
sha256: 43e91bb720869bd8a92d8eb86bc3c74a52c49cf61642ca709b3d7bb89644df36 sha256: 43e91bb720869bd8a92d8eb86bc3c74a52c49cf61642ca709b3d7bb89644df36
uri: huggingface://bartowski/Phi-3.5-MoE-instruct-GGUF/Phi-3.5-MoE-instruct-Q4_K_M.gguf uri: huggingface://bartowski/Phi-3.5-MoE-instruct-GGUF/Phi-3.5-MoE-instruct-Q4_K_M.gguf
- &hermes-2-pro-mistral ### START Hermes - &hermes-2-pro-mistral ### START Hermes
url: "github:mudler/LocalAI/gallery/hermes-2-pro-mistral.yaml@master" url: "github:mudler/LocalAI/gallery/hermes-2-pro-mistral.yaml@master"
name: "hermes-2-pro-mistral" name: "hermes-2-pro-mistral"
icon: https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ggO2sBDJ8Bhc6w-zwTx5j.png icon: https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ggO2sBDJ8Bhc6w-zwTx5j.png
@ -10738,7 +10731,7 @@
- filename: "galatolo-Q4_K.gguf" - filename: "galatolo-Q4_K.gguf"
sha256: "ca0cfd5a9ad40dc16416aa3a277015d0299b62c0803b67f5709580042202c172" sha256: "ca0cfd5a9ad40dc16416aa3a277015d0299b62c0803b67f5709580042202c172"
uri: "huggingface://galatolo/cerbero-7b-gguf/ggml-model-Q4_K.gguf" uri: "huggingface://galatolo/cerbero-7b-gguf/ggml-model-Q4_K.gguf"
- &codellama ### START Codellama - &codellama ### START Codellama
url: "github:mudler/LocalAI/gallery/codellama.yaml@master" url: "github:mudler/LocalAI/gallery/codellama.yaml@master"
name: "codellama-7b" name: "codellama-7b"
license: llama2 license: llama2
@ -10869,7 +10862,7 @@
- filename: "llm-compiler-7b-ftd.Q4_K.gguf" - filename: "llm-compiler-7b-ftd.Q4_K.gguf"
uri: "huggingface://legraphista/llm-compiler-7b-ftd-IMat-GGUF/llm-compiler-7b-ftd.Q4_K.gguf" uri: "huggingface://legraphista/llm-compiler-7b-ftd-IMat-GGUF/llm-compiler-7b-ftd.Q4_K.gguf"
sha256: d862dd18ed335413787d0ad196522a9902a3c10a6456afdab8721822cb0ddde8 sha256: d862dd18ed335413787d0ad196522a9902a3c10a6456afdab8721822cb0ddde8
- &openvino ### START OpenVINO - &openvino ### START OpenVINO
url: "github:mudler/LocalAI/gallery/openvino.yaml@master" url: "github:mudler/LocalAI/gallery/openvino.yaml@master"
name: "openvino-llama-3-8b-instruct-ov-int8" name: "openvino-llama-3-8b-instruct-ov-int8"
license: llama3 license: llama3
@ -10983,7 +10976,7 @@
- gpu - gpu
- embedding - embedding
- cpu - cpu
- &sentencentransformers ### START Embeddings - &sentencentransformers ### START Embeddings
description: | description: |
This framework provides an easy method to compute dense vector representations for sentences, paragraphs, and images. The models are based on transformer networks like BERT / RoBERTa / XLM-RoBERTa etc. and achieve state-of-the-art performance in various tasks. Text is embedded in vector space such that similar text are closer and can efficiently be found using cosine similarity. This framework provides an easy method to compute dense vector representations for sentences, paragraphs, and images. The models are based on transformer networks like BERT / RoBERTa / XLM-RoBERTa etc. and achieve state-of-the-art performance in various tasks. Text is embedded in vector space such that similar text are closer and can efficiently be found using cosine similarity.
urls: urls:
@ -10998,7 +10991,7 @@
overrides: overrides:
parameters: parameters:
model: all-MiniLM-L6-v2 model: all-MiniLM-L6-v2
- &dreamshaper ### START Image generation - &dreamshaper ### START Image generation
name: dreamshaper name: dreamshaper
icon: https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/dd9b038c-bd15-43ab-86ab-66e145ad7ff2/width=450/26072158-132340247-8k%20portrait%20of%20beautiful%20cyborg%20with%20brown%20hair,%20intricate,%20elegant,%20highly%20detailed,%20majestic,%20digital%20photography,%20art%20by%20artg_ed.jpeg icon: https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/dd9b038c-bd15-43ab-86ab-66e145ad7ff2/width=450/26072158-132340247-8k%20portrait%20of%20beautiful%20cyborg%20with%20brown%20hair,%20intricate,%20elegant,%20highly%20detailed,%20majestic,%20digital%20photography,%20art%20by%20artg_ed.jpeg
license: other license: other
@ -11110,7 +11103,7 @@
- filename: t5xxl_fp16.safetensors - filename: t5xxl_fp16.safetensors
sha256: 6e480b09fae049a72d2a8c5fbccb8d3e92febeb233bbe9dfe7256958a9167635 sha256: 6e480b09fae049a72d2a8c5fbccb8d3e92febeb233bbe9dfe7256958a9167635
uri: https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/t5xxl_fp16.safetensors uri: https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/t5xxl_fp16.safetensors
- &whisper ## Whisper - &whisper ## Whisper
url: "github:mudler/LocalAI/gallery/whisper-base.yaml@master" url: "github:mudler/LocalAI/gallery/whisper-base.yaml@master"
name: "whisper-1" name: "whisper-1"
license: "MIT" license: "MIT"
@ -11290,7 +11283,7 @@
description: | description: |
Stable Diffusion in NCNN with c++, supported txt2img and img2img Stable Diffusion in NCNN with c++, supported txt2img and img2img
name: stablediffusion-cpp name: stablediffusion-cpp
- &piper ## Piper TTS - &piper ## Piper TTS
url: github:mudler/LocalAI/gallery/piper.yaml@master url: github:mudler/LocalAI/gallery/piper.yaml@master
name: voice-en-us-kathleen-low name: voice-en-us-kathleen-low
icon: https://github.com/rhasspy/piper/raw/master/etc/logo.png icon: https://github.com/rhasspy/piper/raw/master/etc/logo.png