mirror of
https://github.com/mudler/LocalAI.git
synced 2025-06-22 16:39:05 +00:00
Some checks failed
Explorer deployment / build-linux (push) Has been cancelled
GPU tests / ubuntu-latest (1.21.x) (push) Has been cancelled
generate and publish intel docker caches / generate_caches (intel/oneapi-basekit:2025.1.0-0-devel-ubuntu22.04, linux/amd64, ubuntu-latest) (push) Has been cancelled
build container images / hipblas-jobs (-aio-gpu-hipblas, rocm/dev-ubuntu-22.04:6.1, hipblas, true, ubuntu:22.04, core, latest-gpu-hipblas, latest-aio-gpu-hipblas, --jobs=3 --output-sync=target, linux/amd64, arc-runner-set, false, -hipblas) (push) Has been cancelled
build container images / core-image-build (-aio-cpu, ubuntu:22.04, , true, core, latest-cpu, latest-aio-cpu, --jobs=4 --output-sync=target, linux/amd64,linux/arm64, arc-runner-set, false, auto, ) (push) Has been cancelled
build container images / core-image-build (-aio-gpu-intel-f16, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f16, true, ubuntu:22.04, core, latest-gpu-intel-f16, latest-aio-gpu-intel-f16, --jobs=3 --output-sync=target, linux/amd64, arc-runner-set, false, -sycl-f16) (push) Has been cancelled
build container images / core-image-build (-aio-gpu-intel-f32, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f32, true, ubuntu:22.04, core, latest-gpu-intel-f32, latest-aio-gpu-intel-f32, --jobs=3 --output-sync=target, linux/amd64, arc-runner-set, false, -sycl-f32) (push) Has been cancelled
build container images / core-image-build (-aio-gpu-nvidia-cuda-11, ubuntu:22.04, cublas, 11, 7, true, core, latest-gpu-nvidia-cuda-11, latest-aio-gpu-nvidia-cuda-11, --jobs=4 --output-sync=target, linux/amd64, arc-runner-set, false, false, -cublas-cuda11) (push) Has been cancelled
build container images / core-image-build (-aio-gpu-nvidia-cuda-12, ubuntu:22.04, cublas, 12, 0, true, core, latest-gpu-nvidia-cuda-12, latest-aio-gpu-nvidia-cuda-12, --jobs=4 --output-sync=target, linux/amd64, arc-runner-set, false, false, -cublas-cuda12) (push) Has been cancelled
build container images / core-image-build (-aio-gpu-vulkan, ubuntu:22.04, vulkan, true, core, latest-gpu-vulkan, latest-aio-gpu-vulkan, --jobs=4 --output-sync=target, linux/amd64, arc-runner-set, false, false, -vulkan) (push) Has been cancelled
build container images / gh-runner (nvcr.io/nvidia/l4t-jetpack:r36.4.0, cublas, 12, 0, true, core, latest-nvidia-l4t-arm64, --jobs=4 --output-sync=target, linux/arm64, ubuntu-24.04-arm, true, false, -nvidia-l4t-arm64) (push) Has been cancelled
build python backend container images / backend-jobs (bark, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f16, , , latest-gpu-intel-sycl-f16-bark, linux/amd64, arc-runner-set, true, -gpu-intel-sycl-f16-bark) (push) Has been cancelled
build python backend container images / backend-jobs (bark, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f32, , , latest-gpu-intel-sycl-f32-bark, linux/amd64, arc-runner-set, true, -gpu-intel-sycl-f32-bark) (push) Has been cancelled
build python backend container images / backend-jobs (bark, rocm/dev-ubuntu-22.04:6.1, hipblas, , , latest-gpu-rocm-hipblas-bark, linux/amd64, arc-runner-set, true, -gpu-rocm-hipblas-bark) (push) Has been cancelled
build python backend container images / backend-jobs (bark, ubuntu:22.04, cublas, 11, 7, latest-gpu-nvidia-cuda-11-bark, linux/amd64, arc-runner-set, true, -gpu-nvidia-cuda-11-bark) (push) Has been cancelled
build python backend container images / backend-jobs (bark, ubuntu:22.04, cublas, 12, 0, latest-gpu-nvidia-cuda-12-bark, linux/amd64, arc-runner-set, true, -gpu-nvidia-cuda-12-bark) (push) Has been cancelled
build python backend container images / backend-jobs (chatterbox, ubuntu:22.04, cublas, 11, 7, latest-gpu-nvidia-cuda-11-chatterbox, linux/amd64, arc-runner-set, true, -gpu-nvidia-cuda-11-chatterbox) (push) Has been cancelled
build python backend container images / backend-jobs (chatterbox, ubuntu:22.04, cublas, 12, 0, latest-gpu-nvidia-cuda-12-chatterbox, linux/amd64, arc-runner-set, true, -gpu-nvidia-cuda-12-chatterbox) (push) Has been cancelled
build python backend container images / backend-jobs (coqui, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f16, , , latest-gpu-intel-sycl-f16-coqui, linux/amd64, arc-runner-set, true, -gpu-intel-sycl-f16-coqui) (push) Has been cancelled
build python backend container images / backend-jobs (coqui, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f32, , , latest-gpu-intel-sycl-f32-coqui, linux/amd64, arc-runner-set, true, -gpu-intel-sycl-f32-coqui) (push) Has been cancelled
build python backend container images / backend-jobs (coqui, rocm/dev-ubuntu-22.04:6.1, hipblas, , , latest-gpu-rocm-hipblas-coqui, linux/amd64, arc-runner-set, true, -gpu-rocm-hipblas-coqui) (push) Has been cancelled
build python backend container images / backend-jobs (coqui, ubuntu:22.04, cublas, 11, 7, latest-gpu-nvidia-cuda-11-coqui, linux/amd64, arc-runner-set, true, -gpu-nvidia-cuda-11-coqui) (push) Has been cancelled
build python backend container images / backend-jobs (coqui, ubuntu:22.04, cublas, 12, 0, latest-gpu-nvidia-cuda-12-coqui, linux/amd64, arc-runner-set, true, -gpu-nvidia-cuda-12-coqui) (push) Has been cancelled
build python backend container images / backend-jobs (diffusers, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f32, , , latest-gpu-intel-sycl-f32-diffusers, linux/amd64, arc-runner-set, true, -gpu-intel-sycl-f32-diffusers) (push) Has been cancelled
build python backend container images / backend-jobs (diffusers, rocm/dev-ubuntu-22.04:6.1, hipblas, , , latest-gpu-rocm-hipblas-diffusers, linux/amd64, arc-runner-set, true, -gpu-rocm-hipblas-diffusers) (push) Has been cancelled
build python backend container images / backend-jobs (diffusers, ubuntu:22.04, cublas, 11, 7, latest-gpu-nvidia-cuda-11-diffusers, linux/amd64, arc-runner-set, true, -gpu-nvidia-cuda-11-diffusers) (push) Has been cancelled
build python backend container images / backend-jobs (diffusers, ubuntu:22.04, cublas, 12, 0, latest-gpu-nvidia-cuda-12-diffusers, linux/amd64, arc-runner-set, true, -gpu-nvidia-cuda-12-diffusers) (push) Has been cancelled
build python backend container images / backend-jobs (faster-whisper, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f16, , , latest-gpu-intel-sycl-f16-faster-whisper, linux/amd64, arc-runner-set, true, -gpu-intel-sycl-f16-faster-whisper) (push) Has been cancelled
build python backend container images / backend-jobs (faster-whisper, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f32, , , latest-gpu-intel-sycl-f32-faster-whisper, linux/amd64, arc-runner-set, true, -gpu-intel-sycl-f32-faster-whisper) (push) Has been cancelled
build python backend container images / backend-jobs (faster-whisper, rocm/dev-ubuntu-22.04:6.1, hipblas, , , latest-gpu-rocm-hipblas-faster-whisper, linux/amd64, arc-runner-set, true, -gpu-rocm-hipblas-faster-whisper) (push) Has been cancelled
build python backend container images / backend-jobs (faster-whisper, ubuntu:22.04, cublas, 11, 7, latest-gpu-nvidia-cuda-11-faster-whisper, linux/amd64, arc-runner-set, true, -gpu-nvidia-cuda-11-faster-whisper) (push) Has been cancelled
build python backend container images / backend-jobs (faster-whisper, ubuntu:22.04, cublas, 12, 0, latest-gpu-nvidia-cuda-12-faster-whisper, linux/amd64, arc-runner-set, true, -gpu-nvidia-cuda-12-faster-whisper) (push) Has been cancelled
build python backend container images / backend-jobs (kokoro, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f16, , , latest-gpu-intel-sycl-f16-kokoro, linux/amd64, arc-runner-set, true, -gpu-intel-sycl-f16-kokoro) (push) Has been cancelled
build python backend container images / backend-jobs (kokoro, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f32, , , latest-gpu-intel-sycl-f32-kokoro, linux/amd64, arc-runner-set, true, -gpu-intel-sycl-f32-kokoro) (push) Has been cancelled
build python backend container images / backend-jobs (kokoro, rocm/dev-ubuntu-22.04:6.1, hipblas, , , latest-gpu-rocm-hipblas-kokoro, linux/amd64, arc-runner-set, true, -gpu-rocm-hipblas-kokoro) (push) Has been cancelled
build python backend container images / backend-jobs (kokoro, ubuntu:22.04, cublas, 11, 7, latest-gpu-nvidia-cuda-11-kokoro, linux/amd64, arc-runner-set, true, -gpu-nvidia-cuda-11-kokoro) (push) Has been cancelled
build python backend container images / backend-jobs (kokoro, ubuntu:22.04, cublas, 12, 0, latest-gpu-nvidia-cuda-12-kokoro, linux/amd64, arc-runner-set, true, -gpu-nvidia-cuda-12-kokoro) (push) Has been cancelled
build python backend container images / backend-jobs (rerankers, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f16, , , latest-gpu-intel-sycl-f16-rerankers, linux/amd64, arc-runner-set, true, -gpu-intel-sycl-f16-rerankers) (push) Has been cancelled
build python backend container images / backend-jobs (rerankers, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f32, , , latest-gpu-intel-sycl-f32-rerankers, linux/amd64, arc-runner-set, true, -gpu-intel-sycl-f32-rerankers) (push) Has been cancelled
build python backend container images / backend-jobs (rerankers, rocm/dev-ubuntu-22.04:6.1, hipblas, , , latest-gpu-rocm-hipblas-rerankers, linux/amd64, arc-runner-set, true, -gpu-rocm-hipblas-rerankers) (push) Has been cancelled
build python backend container images / backend-jobs (rerankers, ubuntu:22.04, cublas, 11, 7, latest-gpu-nvidia-cuda-11-rerankers, linux/amd64, arc-runner-set, true, -gpu-nvidia-cuda-11-rerankers) (push) Has been cancelled
build python backend container images / backend-jobs (rerankers, ubuntu:22.04, cublas, 12, 0, latest-gpu-nvidia-cuda-12-rerankers, linux/amd64, arc-runner-set, true, -gpu-nvidia-cuda-12-rerankers) (push) Has been cancelled
build python backend container images / backend-jobs (transformers, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f16, , , latest-gpu-intel-sycl-f16-transformers, linux/amd64, arc-runner-set, true, -gpu-intel-sycl-f16-transformers) (push) Has been cancelled
build python backend container images / backend-jobs (transformers, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f32, , , latest-gpu-intel-sycl-f32-transformers, linux/amd64, arc-runner-set, true, -gpu-intel-sycl-f32-transformers) (push) Has been cancelled
build python backend container images / backend-jobs (transformers, rocm/dev-ubuntu-22.04:6.1, hipblas, , , latest-gpu-rocm-hipblas-transformers, linux/amd64, arc-runner-set, true, -gpu-rocm-hipblas-transformers) (push) Has been cancelled
build python backend container images / backend-jobs (transformers, ubuntu:22.04, cublas, 11, 7, latest-gpu-nvidia-cuda-11-transformers, linux/amd64, arc-runner-set, true, -gpu-nvidia-cuda-11-transformers) (push) Has been cancelled
build python backend container images / backend-jobs (transformers, ubuntu:22.04, cublas, 12, 0, latest-gpu-nvidia-cuda-12-transformers, linux/amd64, arc-runner-set, true, -gpu-nvidia-cuda-12-transformers) (push) Has been cancelled
build python backend container images / backend-jobs (vllm, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f16, , , latest-gpu-intel-sycl-f16-vllm, linux/amd64, arc-runner-set, true, -gpu-intel-sycl-f16-vllm) (push) Has been cancelled
build python backend container images / backend-jobs (vllm, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f32, , , latest-gpu-intel-sycl-f32-vllm, linux/amd64, arc-runner-set, true, -gpu-intel-sycl-f32-vllm) (push) Has been cancelled
build python backend container images / backend-jobs (vllm, rocm/dev-ubuntu-22.04:6.1, hipblas, , , latest-gpu-rocm-hipblas-vllm, linux/amd64, arc-runner-set, true, -gpu-rocm-hipblas-vllm) (push) Has been cancelled
build python backend container images / backend-jobs (vllm, ubuntu:22.04, cublas, 11, 7, latest-gpu-nvidia-cuda-11-vllm, linux/amd64, arc-runner-set, true, -gpu-nvidia-cuda-11-vllm) (push) Has been cancelled
build python backend container images / backend-jobs (vllm, ubuntu:22.04, cublas, 12, 0, latest-gpu-nvidia-cuda-12-vllm, linux/amd64, arc-runner-set, true, -gpu-nvidia-cuda-12-vllm) (push) Has been cancelled
Security Scan / tests (push) Has been cancelled
Tests extras backends / tests-transformers (push) Has been cancelled
Tests extras backends / tests-rerankers (push) Has been cancelled
Tests extras backends / tests-diffusers (push) Has been cancelled
Tests extras backends / tests-coqui (push) Has been cancelled
tests / tests-linux (1.21.x) (push) Has been cancelled
tests / tests-aio-container (push) Has been cancelled
tests / tests-apple (1.21.x) (push) Has been cancelled
Update swagger / swagger (push) Has been cancelled
Check if checksums are up-to-date / checksum_check (push) Has been cancelled
Bump dependencies / bump (mudler/LocalAI) (push) Has been cancelled
Bump dependencies / bump (main, PABannier/bark.cpp, BARKCPP_VERSION) (push) Has been cancelled
Bump dependencies / bump (master, ggml-org/llama.cpp, CPPLLAMA_VERSION) (push) Has been cancelled
Bump dependencies / bump (master, ggml-org/whisper.cpp, WHISPER_CPP_VERSION) (push) Has been cancelled
Bump dependencies / bump (master, leejet/stable-diffusion.cpp, STABLEDIFFUSION_GGML_VERSION) (push) Has been cancelled
Bump dependencies / bump (master, mudler/go-piper, PIPER_VERSION) (push) Has been cancelled
Bump dependencies / bump (master, mudler/go-stable-diffusion, STABLEDIFFUSION_VERSION) (push) Has been cancelled
* feat: Add backend gallery This PR add support to manage backends as similar to models. There is now available a backend gallery which can be used to install and remove extra backends. The backend gallery can be configured similarly as a model gallery, and API calls allows to install and remove new backends in runtime, and as well during the startup phase of LocalAI. Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Add backends docs Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * wip: Backend Dockerfile for python backends Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * feat: drop extras images, build python backends separately Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * fixup on all backends Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * test CI Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Tweaks Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Drop old backends leftovers Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Fixup CI Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Move dockerfile upper Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Fix proto Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Feature dropped for consistency - we prefer model galleries Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Add missing packages in the build image Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * exllama is ponly available on cublas Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * pin torch on chatterbox Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Fixups to index Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * CI Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Debug CI * Install accellerators deps Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Add target arch * Add cuda minor version Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Use self-hosted runners Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * ci: use quay for test images Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * fixups for vllm and chatterbox Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Small fixups on CI Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * chatterbox is only available for nvidia Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Simplify CI builds Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Adapt test, use qwen3 Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * chore(model gallery): add jina-reranker-v1-tiny-en-gguf Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * fix(gguf-parser): recover from potential panics that can happen while reading ggufs with gguf-parser Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Use reranker from llama.cpp in AIO images Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Limit concurrent jobs Signed-off-by: Ettore Di Giacinto <mudler@localai.io> --------- Signed-off-by: Ettore Di Giacinto <mudler@localai.io> Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
124 lines
3.4 KiB
Go
124 lines
3.4 KiB
Go
package main
|
|
|
|
import (
|
|
"os"
|
|
"os/signal"
|
|
"path/filepath"
|
|
"syscall"
|
|
|
|
"github.com/alecthomas/kong"
|
|
"github.com/joho/godotenv"
|
|
"github.com/mudler/LocalAI/core/cli"
|
|
"github.com/mudler/LocalAI/internal"
|
|
|
|
"github.com/rs/zerolog"
|
|
"github.com/rs/zerolog/log"
|
|
|
|
_ "github.com/mudler/LocalAI/swagger"
|
|
)
|
|
|
|
func main() {
|
|
var err error
|
|
|
|
// Initialize zerolog at a level of INFO, we will set the desired level after we parse the CLI options
|
|
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr})
|
|
zerolog.SetGlobalLevel(zerolog.InfoLevel)
|
|
|
|
// Catch signals from the OS requesting us to exit
|
|
go func() {
|
|
c := make(chan os.Signal, 1) // we need to reserve to buffer size 1, so the notifier are not blocked
|
|
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
|
<-c
|
|
os.Exit(1)
|
|
}()
|
|
|
|
// handle loading environment variabled from .env files
|
|
envFiles := []string{".env", "localai.env"}
|
|
homeDir, err := os.UserHomeDir()
|
|
if err == nil {
|
|
envFiles = append(envFiles, filepath.Join(homeDir, "localai.env"), filepath.Join(homeDir, ".config/localai.env"))
|
|
}
|
|
envFiles = append(envFiles, "/etc/localai.env")
|
|
|
|
for _, envFile := range envFiles {
|
|
if _, err := os.Stat(envFile); err == nil {
|
|
log.Info().Str("envFile", envFile).Msg("env file found, loading environment variables from file")
|
|
err = godotenv.Load(envFile)
|
|
if err != nil {
|
|
log.Error().Err(err).Str("envFile", envFile).Msg("failed to load environment variables from file")
|
|
continue
|
|
}
|
|
}
|
|
}
|
|
|
|
// Actually parse the CLI options
|
|
ctx := kong.Parse(&cli.CLI,
|
|
kong.Description(
|
|
` LocalAI is a drop-in replacement OpenAI API for running LLM, GPT and genAI models locally on CPU, GPUs with consumer grade hardware.
|
|
|
|
Some of the models compatible are:
|
|
- Vicuna
|
|
- Koala
|
|
- GPT4ALL
|
|
- GPT4ALL-J
|
|
- Cerebras
|
|
- Alpaca
|
|
- StableLM (ggml quantized)
|
|
|
|
For a list of all available models for one-click install, check out: https://models.localai.io
|
|
|
|
Copyright: Ettore Di Giacinto
|
|
|
|
Version: ${version}
|
|
`,
|
|
),
|
|
kong.UsageOnError(),
|
|
kong.Vars{
|
|
"basepath": kong.ExpandPath("."),
|
|
"galleries": `[{"name":"localai", "url":"github:mudler/LocalAI/gallery/index.yaml@master"}]`,
|
|
"backends": `[{"name":"localai", "url":"github:mudler/LocalAI/backend/index.yaml@master"}]`,
|
|
"version": internal.PrintableVersion(),
|
|
},
|
|
)
|
|
|
|
// Configure the logging level before we run the application
|
|
// This is here to preserve the existing --debug flag functionality
|
|
logLevel := "info"
|
|
if cli.CLI.Debug && cli.CLI.LogLevel == nil {
|
|
logLevel = "debug"
|
|
zerolog.SetGlobalLevel(zerolog.DebugLevel)
|
|
cli.CLI.LogLevel = &logLevel
|
|
}
|
|
|
|
if cli.CLI.LogLevel == nil {
|
|
cli.CLI.LogLevel = &logLevel
|
|
}
|
|
|
|
switch *cli.CLI.LogLevel {
|
|
case "error":
|
|
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
|
|
log.Info().Msg("Setting logging to error")
|
|
case "warn":
|
|
zerolog.SetGlobalLevel(zerolog.WarnLevel)
|
|
log.Info().Msg("Setting logging to warn")
|
|
case "info":
|
|
zerolog.SetGlobalLevel(zerolog.InfoLevel)
|
|
log.Info().Msg("Setting logging to info")
|
|
case "debug":
|
|
zerolog.SetGlobalLevel(zerolog.DebugLevel)
|
|
log.Debug().Msg("Setting logging to debug")
|
|
case "trace":
|
|
zerolog.SetGlobalLevel(zerolog.TraceLevel)
|
|
log.Trace().Msg("Setting logging to trace")
|
|
}
|
|
|
|
// Populate the application with the embedded backend assets
|
|
cli.CLI.Context.BackendAssets = backendAssets
|
|
|
|
// Run the thing!
|
|
err = ctx.Run(&cli.CLI.Context)
|
|
if err != nil {
|
|
log.Fatal().Err(err).Msg("Error running the application")
|
|
}
|
|
}
|