diff --git a/Dockerfile b/Dockerfile index d730d4b4..60c30669 100644 --- a/Dockerfile +++ b/Dockerfile @@ -42,7 +42,7 @@ COPY . . RUN make build # Define the health check command -HEALTHCHECK --interval=30s --timeout=360s --retries=10 \ +HEALTHCHECK --interval=1m --timeout=10m --retries=10 \ CMD curl -f $HEALTHCHECK_ENDPOINT || exit 1 EXPOSE 8080 diff --git a/Dockerfile.dev b/Dockerfile.dev index df44359a..bc4a3377 100644 --- a/Dockerfile.dev +++ b/Dockerfile.dev @@ -72,7 +72,7 @@ RUN apt-get install -y libgomp1 libopencv-core4.5 libopencv-imgcodecs4.5 COPY --from=builder /build/local-ai /usr/bin/local-ai # Define the health check command -HEALTHCHECK --interval=30s --timeout=360s --retries=10 \ +HEALTHCHECK --interval=1m --timeout=10m --retries=10 \ CMD curl -f $HEALTHCHECK_ENDPOINT || exit 1 EXPOSE 8080 diff --git a/api/api.go b/api/api.go index dd5f3022..fb3addca 100644 --- a/api/api.go +++ b/api/api.go @@ -11,7 +11,7 @@ import ( "github.com/rs/zerolog/log" ) -func App(opts ...AppOption) *fiber.App { +func App(opts ...AppOption) (*fiber.App, error) { options := newOptions(opts...) zerolog.SetGlobalLevel(zerolog.InfoLevel) @@ -71,13 +71,13 @@ func App(opts ...AppOption) *fiber.App { if options.preloadJSONModels != "" { if err := ApplyGalleryFromString(options.loader.ModelPath, options.preloadJSONModels, cm); err != nil { - return nil + return nil, err } } if options.preloadModelsFromPath != "" { if err := ApplyGalleryFromFile(options.loader.ModelPath, options.preloadModelsFromPath, cm); err != nil { - return nil + return nil, err } } @@ -138,5 +138,5 @@ func App(opts ...AppOption) *fiber.App { app.Get("/v1/models", listModels(options.loader, cm)) app.Get("/models", listModels(options.loader, cm)) - return app + return app, nil } diff --git a/main.go b/main.go index f391affc..bdf95db7 100644 --- a/main.go +++ b/main.go @@ -111,7 +111,7 @@ It uses llama.cpp, ggml and gpt4all as backend with golang c bindings. Copyright: "go-skynet authors", Action: func(ctx *cli.Context) error { fmt.Printf("Starting LocalAI using %d threads, with models path: %s\n", ctx.Int("threads"), ctx.String("models-path")) - return api.App( + app, err := api.App( api.WithConfigFile(ctx.String("config-file")), api.WithJSONStringPreload(ctx.String("preload-models")), api.WithYAMLConfigPreload(ctx.String("preload-models-config")), @@ -124,7 +124,12 @@ It uses llama.cpp, ggml and gpt4all as backend with golang c bindings. api.WithCors(ctx.Bool("cors")), api.WithCorsAllowOrigins(ctx.String("cors-allow-origins")), api.WithThreads(ctx.Int("threads")), - api.WithUploadLimitMB(ctx.Int("upload-limit"))).Listen(ctx.String("address")) + api.WithUploadLimitMB(ctx.Int("upload-limit"))) + if err != nil { + return err + } + + return app.Listen(ctx.String("address")) }, }