mirror of
https://github.com/mudler/LocalAI.git
synced 2024-12-19 20:57:54 +00:00
fix(initializer): do select backends that exist (#2694)
we were not checking if the binary exists before picking these up from the asset dir. Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
bd2f95c130
commit
e591ff2e74
@ -282,6 +282,8 @@ COPY --from=grpc /opt/grpc /usr/local
|
|||||||
|
|
||||||
# Rebuild with defaults backends
|
# Rebuild with defaults backends
|
||||||
WORKDIR /build
|
WORKDIR /build
|
||||||
|
|
||||||
|
## Build the binary
|
||||||
RUN make build
|
RUN make build
|
||||||
|
|
||||||
RUN if [ ! -d "/build/sources/go-piper/piper-phonemize/pi/lib/" ]; then \
|
RUN if [ ! -d "/build/sources/go-piper/piper-phonemize/pi/lib/" ]; then \
|
||||||
|
@ -247,14 +247,23 @@ func selectGRPCProcess(backend, assetDir string, f16 bool) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if xsysinfo.HasCPUCaps(cpuid.AVX2) {
|
if xsysinfo.HasCPUCaps(cpuid.AVX2) {
|
||||||
|
p := backendPath(assetDir, LLamaCPPAVX2)
|
||||||
|
if _, err := os.Stat(p); err == nil {
|
||||||
log.Info().Msgf("[%s] attempting to load with AVX2 variant", backend)
|
log.Info().Msgf("[%s] attempting to load with AVX2 variant", backend)
|
||||||
grpcProcess = backendPath(assetDir, LLamaCPPAVX2)
|
grpcProcess = p
|
||||||
|
}
|
||||||
} else if xsysinfo.HasCPUCaps(cpuid.AVX) {
|
} else if xsysinfo.HasCPUCaps(cpuid.AVX) {
|
||||||
|
p := backendPath(assetDir, LLamaCPPAVX)
|
||||||
|
if _, err := os.Stat(p); err == nil {
|
||||||
log.Info().Msgf("[%s] attempting to load with AVX variant", backend)
|
log.Info().Msgf("[%s] attempting to load with AVX variant", backend)
|
||||||
grpcProcess = backendPath(assetDir, LLamaCPPAVX)
|
grpcProcess = p
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
|
p := backendPath(assetDir, LLamaCPPFallback)
|
||||||
|
if _, err := os.Stat(p); err == nil {
|
||||||
log.Info().Msgf("[%s] attempting to load with fallback variant", backend)
|
log.Info().Msgf("[%s] attempting to load with fallback variant", backend)
|
||||||
grpcProcess = backendPath(assetDir, LLamaCPPFallback)
|
grpcProcess = p
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return grpcProcess
|
return grpcProcess
|
||||||
@ -511,12 +520,24 @@ func (ml *ModelLoader) GreedyLoader(opts ...Option) (grpc.Backend, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if autoDetect && key == LLamaCPP && err != nil {
|
if autoDetect && key == LLamaCPP && err != nil {
|
||||||
backendToUse := LLamaCPPFallback
|
// try as hard as possible to run the llama.cpp variants
|
||||||
|
backendToUse := ""
|
||||||
if xsysinfo.HasCPUCaps(cpuid.AVX2) {
|
if xsysinfo.HasCPUCaps(cpuid.AVX2) {
|
||||||
|
if _, err := os.Stat(backendPath(o.assetDir, LLamaCPPAVX2)); err == nil {
|
||||||
backendToUse = LLamaCPPAVX2
|
backendToUse = LLamaCPPAVX2
|
||||||
|
}
|
||||||
} else if xsysinfo.HasCPUCaps(cpuid.AVX) {
|
} else if xsysinfo.HasCPUCaps(cpuid.AVX) {
|
||||||
|
if _, err := os.Stat(backendPath(o.assetDir, LLamaCPPAVX2)); err == nil {
|
||||||
backendToUse = LLamaCPPAVX
|
backendToUse = LLamaCPPAVX
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
if _, err := os.Stat(backendPath(o.assetDir, LLamaCPPFallback)); err == nil {
|
||||||
|
backendToUse = LLamaCPPFallback
|
||||||
|
} else {
|
||||||
|
// If we don't have a fallback, just skip fallback
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Autodetection failed, try the fallback
|
// Autodetection failed, try the fallback
|
||||||
log.Info().Msgf("[%s] Autodetection failed, trying the fallback", key)
|
log.Info().Msgf("[%s] Autodetection failed, trying the fallback", key)
|
||||||
|
Loading…
Reference in New Issue
Block a user