@@ -48,25 +47,58 @@
[](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml)[](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml)[](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml)[](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml)[](https://artifacthub.io/packages/search?repo=localai)
-**LocalAI** is the free, Open Source OpenAI alternative. LocalAI act as a drop-in replacement REST API thatβs compatible with OpenAI (Elevenlabs, Anthropic... ) API specifications for local AI inferencing. It allows you to run LLMs, generate images, audio (and not only) locally or on-prem with consumer grade hardware, supporting multiple model families. Does not require GPU. It is created and maintained by [Ettore Di Giacinto](https://github.com/mudler).
+**LocalAI** is the free, Open Source OpenAI alternative. LocalAI act as a drop-in replacement REST API that's compatible with OpenAI (Elevenlabs, Anthropic... ) API specifications for local AI inferencing. It allows you to run LLMs, generate images, audio (and not only) locally or on-prem with consumer grade hardware, supporting multiple model families. Does not require GPU. It is created and maintained by [Ettore Di Giacinto](https://github.com/mudler).
+
+
+## ππ Local Stack Family
+
+π LocalAI is now part of a comprehensive suite of AI tools designed to work together:
+
+
A powerful Local AI agent management platform that serves as a drop-in replacement for OpenAI's Responses API, enhanced with advanced agentic capabilities.
diff --git a/docs/assets/images/imagen.png b/docs/assets/images/imagen.png
new file mode 100644
index 00000000..7d9808f4
Binary files /dev/null and b/docs/assets/images/imagen.png differ
diff --git a/docs/assets/images/localai_screenshot.png b/docs/assets/images/localai_screenshot.png
new file mode 100644
index 00000000..17774d1a
Binary files /dev/null and b/docs/assets/images/localai_screenshot.png differ
diff --git a/docs/assets/images/logos/logo.png b/docs/assets/images/logos/logo.png
new file mode 100644
index 00000000..de98e67b
Binary files /dev/null and b/docs/assets/images/logos/logo.png differ
diff --git a/docs/assets/images/logos/logo.svg b/docs/assets/images/logos/logo.svg
new file mode 100644
index 00000000..5e881d4b
--- /dev/null
+++ b/docs/assets/images/logos/logo.svg
@@ -0,0 +1,171 @@
+
+
diff --git a/docs/assets/images/screenshots/screenshot_chat.png b/docs/assets/images/screenshots/screenshot_chat.png
new file mode 100644
index 00000000..bc621ba7
Binary files /dev/null and b/docs/assets/images/screenshots/screenshot_chat.png differ
diff --git a/docs/assets/images/screenshots/screenshot_gallery.png b/docs/assets/images/screenshots/screenshot_gallery.png
new file mode 100644
index 00000000..c8a33642
Binary files /dev/null and b/docs/assets/images/screenshots/screenshot_gallery.png differ
diff --git a/docs/assets/images/screenshots/screenshot_home.png b/docs/assets/images/screenshots/screenshot_home.png
new file mode 100644
index 00000000..18777a46
Binary files /dev/null and b/docs/assets/images/screenshots/screenshot_home.png differ
diff --git a/docs/assets/images/screenshots/screenshot_image.png b/docs/assets/images/screenshots/screenshot_image.png
new file mode 100644
index 00000000..7d9808f4
Binary files /dev/null and b/docs/assets/images/screenshots/screenshot_image.png differ
diff --git a/docs/assets/images/screenshots/screenshot_login.png b/docs/assets/images/screenshots/screenshot_login.png
new file mode 100644
index 00000000..82b1614a
Binary files /dev/null and b/docs/assets/images/screenshots/screenshot_login.png differ
diff --git a/docs/assets/images/screenshots/screenshot_p2p.png b/docs/assets/images/screenshots/screenshot_p2p.png
new file mode 100644
index 00000000..fbeb75ef
Binary files /dev/null and b/docs/assets/images/screenshots/screenshot_p2p.png differ
diff --git a/docs/assets/images/screenshots/screenshot_talk.png b/docs/assets/images/screenshots/screenshot_talk.png
new file mode 100644
index 00000000..956b59d1
Binary files /dev/null and b/docs/assets/images/screenshots/screenshot_talk.png differ
diff --git a/docs/assets/images/screenshots/screenshot_tts.png b/docs/assets/images/screenshots/screenshot_tts.png
new file mode 100644
index 00000000..8df68f70
Binary files /dev/null and b/docs/assets/images/screenshots/screenshot_tts.png differ
diff --git a/docs/assets/jsconfig.json b/docs/assets/jsconfig.json
index 9f2d1c43..f3bd7ab2 100644
--- a/docs/assets/jsconfig.json
+++ b/docs/assets/jsconfig.json
@@ -3,7 +3,7 @@
"baseUrl": ".",
"paths": {
"*": [
- "../../../../.cache/hugo_cache/modules/filecache/modules/pkg/mod/github.com/gohugoio/hugo-mod-jslibs-dist/popperjs/v2@v2.21100.20000/package/dist/cjs/popper.js/*",
+ "../../../../.cache/hugo_cache/modules/filecache/modules/pkg/mod/github.com/gohugoio/hugo-mod-jslibs-dist/popperjs/v2@v2.21100.20000/package/dist/cjs/*",
"../../../../.cache/hugo_cache/modules/filecache/modules/pkg/mod/github.com/twbs/bootstrap@v5.3.2+incompatible/js/*"
]
}
diff --git a/docs/config.toml b/docs/config.toml
index 52602750..97e89ce6 100644
--- a/docs/config.toml
+++ b/docs/config.toml
@@ -48,9 +48,9 @@ defaultContentLanguage = 'en'
[params.docs] # Parameters for the /docs 'template'
- logo = "https://github.com/go-skynet/LocalAI/assets/2420543/0966aa2a-166e-4f99-a3e5-6c915fc997dd"
- logo_text = "LocalAI"
- title = "LocalAI documentation" # default html title for documentation pages/sections
+ logo = "https://raw.githubusercontent.com/mudler/LocalAI/refs/heads/master/core/http/static/logo.png"
+ logo_text = ""
+ title = "LocalAI" # default html title for documentation pages/sections
pathName = "docs" # path name for documentation site | default "docs"
@@ -108,6 +108,7 @@ defaultContentLanguage = 'en'
# indexName = "" # Index Name to perform search on (or set env variable HUGO_PARAM_DOCSEARCH_indexName)
[params.analytics] # Parameters for Analytics (Google, Plausible)
+ # google = "G-XXXXXXXXXX" # Replace with your Google Analytics ID
# plausibleURL = "/docs/s" # (or set via env variable HUGO_PARAM_ANALYTICS_plausibleURL)
# plausibleAPI = "/docs/s" # optional - (or set via env variable HUGO_PARAM_ANALYTICS_plausibleAPI)
# plausibleDomain = "" # (or set via env variable HUGO_PARAM_ANALYTICS_plausibleDomain)
diff --git a/docs/content/docs/features/distributed_inferencing.md b/docs/content/docs/features/distributed_inferencing.md
index 71d29f39..d599de87 100644
--- a/docs/content/docs/features/distributed_inferencing.md
+++ b/docs/content/docs/features/distributed_inferencing.md
@@ -13,6 +13,8 @@ LocalAI supports two modes of distributed inferencing via p2p:
- **Federated Mode**: Requests are shared between the cluster and routed to a single worker node in the network based on the load balancer's decision.
- **Worker Mode** (aka "model sharding" or "splitting weights"): Requests are processed by all the workers which contributes to the final inference result (by sharing the model weights).
+A list of global instances shared by the community is available at [explorer.localai.io](https://explorer.localai.io).
+
## Usage
Starting LocalAI with `--p2p` generates a shared token for connecting multiple instances: and that's all you need to create AI clusters, eliminating the need for intricate network setups.
diff --git a/docs/content/docs/getting-started/quickstart.md b/docs/content/docs/getting-started/quickstart.md
index 4e14c505..0d962d3c 100644
--- a/docs/content/docs/getting-started/quickstart.md
+++ b/docs/content/docs/getting-started/quickstart.md
@@ -18,14 +18,45 @@ To access the WebUI with an API_KEY, browser extensions such as [Requestly](http
{{% /alert %}}
-## Using the Bash Installer
+## Quickstart
-Install LocalAI easily using the bash installer with the following command:
-```sh
+### Using the Bash Installer
+```bash
curl https://localai.io/install.sh | sh
```
+### Run with docker:
+```bash
+# CPU only image:
+docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-cpu
+
+# Nvidia GPU:
+docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-12
+
+# CPU and GPU image (bigger size):
+docker run -ti --name local-ai -p 8080:8080 localai/localai:latest
+
+# AIO images (it will pre-download a set of models ready for use, see https://localai.io/basics/container/)
+docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu
+```
+
+### Load models:
+
+```bash
+# From the model gallery (see available models with `local-ai models list`, in the WebUI from the model tab, or visiting https://models.localai.io)
+local-ai run llama-3.2-1b-instruct:q4_k_m
+# Start LocalAI with the phi-2 model directly from huggingface
+local-ai run huggingface://TheBloke/phi-2-GGUF/phi-2.Q8_0.gguf
+# Install and run a model from the Ollama OCI registry
+local-ai run ollama://gemma:2b
+# Run a model from a configuration file
+local-ai run https://gist.githubusercontent.com/.../phi-2.yaml
+# Install and run a model from a standard OCI registry (e.g., Docker Hub)
+local-ai run oci://localai/phi-2:latest
+```
+
+
For a full list of options, refer to the [Installer Options]({{% relref "docs/advanced/installer" %}}) documentation.
Binaries can also be [manually downloaded]({{% relref "docs/reference/binaries" %}}).
diff --git a/docs/content/docs/overview.md b/docs/content/docs/overview.md
index 11b9ce7d..981ba765 100644
--- a/docs/content/docs/overview.md
+++ b/docs/content/docs/overview.md
@@ -1,4 +1,3 @@
-
+++
title = "Overview"
weight = 1
@@ -7,162 +6,96 @@ description = "What is LocalAI?"
tags = ["Beginners"]
categories = [""]
author = "Ettore Di Giacinto"
-# This allows to overwrite the landing page
-url = '/'
icon = "info"
+++
-
+LocalAI is your complete AI stack for running AI models locally. It's designed to be simple, efficient, and accessible, providing a drop-in replacement for OpenAI's API while keeping your data private and secure.
-
+- **Privacy First**: Your data never leaves your machine
+- **Complete Control**: Run models on your terms, with your hardware
+- **Open Source**: MIT licensed and community-driven
+- **Flexible Deployment**: From laptops to servers, with or without GPUs
+- **Extensible**: Add new models and features as needed
-> π‘ Get help - [βFAQ](https://localai.io/faq/) [πDiscussions](https://github.com/go-skynet/LocalAI/discussions) [πDiscord](https://discord.gg/uJAeKSAGDy)
->
-> [π» Quickstart](https://localai.io/basics/getting_started/) [πΌοΈ Models](https://models.localai.io/) [π Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) [π₯½ Demo](https://demo.localai.io) [π Explorer](https://explorer.localai.io) [π« Examples](https://github.com/go-skynet/LocalAI/tree/master/examples/)
+## Core Components
+LocalAI is more than just a single tool - it's a complete ecosystem:
-**LocalAI** is the free, Open Source OpenAI alternative. LocalAI act as a drop-in replacement REST API that's compatible with OpenAI API specifications for local inferencing. It allows you to run LLMs, generate images, audio (and not only) locally or on-prem with consumer grade hardware, supporting multiple model families and architectures. Does not require GPU. It is created and maintained by [Ettore Di Giacinto](https://github.com/mudler).
+1. **[LocalAI Core](https://github.com/mudler/LocalAI)**
+ - OpenAI-compatible API
+ - Multiple model support (LLMs, image, audio)
+ - No GPU required
+ - Fast inference with native bindings
+ - [Github repository](https://github.com/mudler/LocalAI)
+2. **[LocalAGI](https://github.com/mudler/LocalAGI)**
+ - Autonomous AI agents
+ - No coding required
+ - WebUI and REST API support
+ - Extensible agent framework
+ - [Github repository](https://github.com/mudler/LocalAGI)
-## Start LocalAI
+3. **[LocalRecall](https://github.com/mudler/LocalRecall)**
+ - Semantic search
+ - Memory management
+ - Vector database
+ - Perfect for AI applications
+ - [Github repository](https://github.com/mudler/LocalRecall)
-Start the image with Docker to have a functional clone of OpenAI! π:
+## Getting Started
-```bash
-docker run -p 8080:8080 --name local-ai -ti localai/localai:latest-aio-cpu
-# Do you have a Nvidia GPUs? Use this instead
-# CUDA 11
-# docker run -p 8080:8080 --gpus all --name local-ai -ti localai/localai:latest-aio-gpu-nvidia-cuda-11
-# CUDA 12
-# docker run -p 8080:8080 --gpus all --name local-ai -ti localai/localai:latest-aio-gpu-nvidia-cuda-12
-```
-
-Or just use the bash installer:
+The fastest way to get started is with our one-line installer:
```bash
curl https://localai.io/install.sh | sh
```
-See the [π» Quickstart](https://localai.io/basics/getting_started/) for all the options and way you can run LocalAI!
+Or use Docker for a quick start:
-## What is LocalAI?
+```bash
+docker run -p 8080:8080 --name local-ai -ti localai/localai:latest-aio-cpu
+```
-In a nutshell:
+For more detailed installation options and configurations, see our [Getting Started guide](/basics/getting_started/).
-- Local, OpenAI drop-in alternative REST API. You own your data.
-- NO GPU required. NO Internet access is required either
- - Optional, GPU Acceleration is available. See also the [build section](https://localai.io/basics/build/index.html).
-- Supports multiple models
-- π Once loaded the first time, it keep models loaded in memory for faster inference
-- β‘ Doesn't shell-out, but uses bindings for a faster inference and better performance.
+## Key Features
-LocalAI is focused on making the AI accessible to anyone. Any contribution, feedback and PR is welcome!
+- **Text Generation**: Run various LLMs locally
+- **Image Generation**: Create images with stable diffusion
+- **Audio Processing**: Text-to-speech and speech-to-text
+- **Vision API**: Image understanding and analysis
+- **Embeddings**: Vector database support
+- **Functions**: OpenAI-compatible function calling
+- **P2P**: Distributed inference capabilities
-Note that this started just as a fun weekend project by [mudler](https://github.com/mudler) in order to try to create the necessary pieces for a full AI assistant like `ChatGPT`: the community is growing fast and we are working hard to make it better and more stable. If you want to help, please consider contributing (see below)!
+## Community and Support
-### π Features
+LocalAI is a community-driven project. You can:
-- π [Text generation with GPTs](https://localai.io/features/text-generation/) (`llama.cpp`, `gpt4all.cpp`, ... [:book: and more](https://localai.io/model-compatibility/index.html#model-compatibility-table))
-- π£ [Text to Audio](https://localai.io/features/text-to-audio/)
-- π [Audio to Text](https://localai.io/features/audio-to-text/) (Audio transcription with `whisper.cpp`)
-- π¨ [Image generation with stable diffusion](https://localai.io/features/image-generation)
-- π₯ [OpenAI functions](https://localai.io/features/openai-functions/) π
-- π§ [Embeddings generation for vector databases](https://localai.io/features/embeddings/)
-- βοΈ [Constrained grammars](https://localai.io/features/constrained_grammars/)
-- πΌοΈ [Download Models directly from Huggingface ](https://localai.io/models/)
-- π₯½ [Vision API](https://localai.io/features/gpt-vision/)
-- πΎ [Stores](https://localai.io/stores)
-- π [Reranker](https://localai.io/features/reranker/)
-- ππ§ [P2P Inferencing](https://localai.io/features/distribute/)
+- Join our [Discord community](https://discord.gg/uJAeKSAGDy)
+- Check out our [GitHub repository](https://github.com/mudler/LocalAI)
+- Contribute to the project
+- Share your use cases and examples
-## Contribute and help
+## Next Steps
-To help the project you can:
+Ready to dive in? Here are some recommended next steps:
-- If you have technological skills and want to contribute to development, have a look at the open issues. If you are new you can have a look at the [good-first-issue](https://github.com/go-skynet/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) and [help-wanted](https://github.com/go-skynet/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) labels.
+1. [Install LocalAI](/basics/getting_started/)
+2. [Explore available models](https://models.localai.io)
+3. [Model compatibility](/model-compatibility/)
+4. [Try out examples](https://github.com/mudler/LocalAI-examples)
+5. [Join the community](https://discord.gg/uJAeKSAGDy)
+6. [Check the LocalAI Github repository](https://github.com/mudler/LocalAI)
+7. [Check the LocalAGI Github repository](https://github.com/mudler/LocalAGI)
-- If you don't have technological skills you can still help improving documentation or [add examples](https://github.com/go-skynet/LocalAI/tree/master/examples) or share your user-stories with our community, any help and contribution is welcome!
-## π Star history
+## License
-[](https://star-history.com/#mudler/LocalAI&Date)
-
-## β€οΈ Sponsors
-
-> Do you find LocalAI useful?
-
-Support the project by becoming [a backer or sponsor](https://github.com/sponsors/mudler). Your logo will show up here with a link to your website.
-
-A huge thank you to our generous sponsors who support this project covering CI expenses, and our [Sponsor list](https://github.com/sponsors/mudler):
-
-