mirror of
https://github.com/mudler/LocalAI.git
synced 2025-04-25 13:29:58 +00:00
feat: add llama-stable backend (#932)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
cc060a283d
commit
ab5b75eb01
22
Makefile
22
Makefile
@ -6,6 +6,8 @@ BINARY_NAME=local-ai
|
|||||||
# llama.cpp versions
|
# llama.cpp versions
|
||||||
GOLLAMA_VERSION?=f03869d188b72c8a617bea3a36cf8eb43f73445c
|
GOLLAMA_VERSION?=f03869d188b72c8a617bea3a36cf8eb43f73445c
|
||||||
|
|
||||||
|
GOLLAMA_STABLE_VERSION?=50cee7712066d9e38306eccadcfbb44ea87df4b7
|
||||||
|
|
||||||
# gpt4all version
|
# gpt4all version
|
||||||
GPT4ALL_REPO?=https://github.com/nomic-ai/gpt4all
|
GPT4ALL_REPO?=https://github.com/nomic-ai/gpt4all
|
||||||
GPT4ALL_VERSION?=36f7fb584824961dc692c9f2354ee8f60c50587b
|
GPT4ALL_VERSION?=36f7fb584824961dc692c9f2354ee8f60c50587b
|
||||||
@ -194,17 +196,23 @@ go-llama:
|
|||||||
git clone --recurse-submodules https://github.com/go-skynet/go-llama.cpp go-llama
|
git clone --recurse-submodules https://github.com/go-skynet/go-llama.cpp go-llama
|
||||||
cd go-llama && git checkout -b build $(GOLLAMA_VERSION) && git submodule update --init --recursive --depth 1
|
cd go-llama && git checkout -b build $(GOLLAMA_VERSION) && git submodule update --init --recursive --depth 1
|
||||||
|
|
||||||
|
go-llama-stable:
|
||||||
|
git clone --recurse-submodules https://github.com/go-skynet/go-llama.cpp go-llama-stable
|
||||||
|
cd go-llama-stable && git checkout -b build $(GOLLAMA_STABLE_VERSION) && git submodule update --init --recursive --depth 1
|
||||||
|
|
||||||
go-llama/libbinding.a: go-llama
|
go-llama/libbinding.a: go-llama
|
||||||
$(MAKE) -C go-llama BUILD_TYPE=$(BUILD_TYPE) libbinding.a
|
$(MAKE) -C go-llama BUILD_TYPE=$(BUILD_TYPE) libbinding.a
|
||||||
|
|
||||||
|
go-llama-stable/libbinding.a: go-llama-stable
|
||||||
|
$(MAKE) -C go-llama-stable BUILD_TYPE=$(BUILD_TYPE) libbinding.a
|
||||||
|
|
||||||
go-piper/libpiper_binding.a:
|
go-piper/libpiper_binding.a:
|
||||||
$(MAKE) -C go-piper libpiper_binding.a example/main
|
$(MAKE) -C go-piper libpiper_binding.a example/main
|
||||||
|
|
||||||
get-sources: go-llama go-ggllm go-ggml-transformers gpt4all go-piper go-rwkv whisper.cpp go-bert bloomz go-stable-diffusion
|
get-sources: go-llama go-llama-stable go-ggllm go-ggml-transformers gpt4all go-piper go-rwkv whisper.cpp go-bert bloomz go-stable-diffusion
|
||||||
touch $@
|
touch $@
|
||||||
|
|
||||||
replace:
|
replace:
|
||||||
$(GOCMD) mod edit -replace github.com/go-skynet/go-llama.cpp=$(shell pwd)/go-llama
|
|
||||||
$(GOCMD) mod edit -replace github.com/nomic-ai/gpt4all/gpt4all-bindings/golang=$(shell pwd)/gpt4all/gpt4all-bindings/golang
|
$(GOCMD) mod edit -replace github.com/nomic-ai/gpt4all/gpt4all-bindings/golang=$(shell pwd)/gpt4all/gpt4all-bindings/golang
|
||||||
$(GOCMD) mod edit -replace github.com/go-skynet/go-ggml-transformers.cpp=$(shell pwd)/go-ggml-transformers
|
$(GOCMD) mod edit -replace github.com/go-skynet/go-ggml-transformers.cpp=$(shell pwd)/go-ggml-transformers
|
||||||
$(GOCMD) mod edit -replace github.com/donomii/go-rwkv.cpp=$(shell pwd)/go-rwkv
|
$(GOCMD) mod edit -replace github.com/donomii/go-rwkv.cpp=$(shell pwd)/go-rwkv
|
||||||
@ -222,6 +230,7 @@ prepare-sources: get-sources replace
|
|||||||
rebuild: ## Rebuilds the project
|
rebuild: ## Rebuilds the project
|
||||||
$(GOCMD) clean -cache
|
$(GOCMD) clean -cache
|
||||||
$(MAKE) -C go-llama clean
|
$(MAKE) -C go-llama clean
|
||||||
|
$(MAKE) -C go-llama-stable clean
|
||||||
$(MAKE) -C gpt4all/gpt4all-bindings/golang/ clean
|
$(MAKE) -C gpt4all/gpt4all-bindings/golang/ clean
|
||||||
$(MAKE) -C go-ggml-transformers clean
|
$(MAKE) -C go-ggml-transformers clean
|
||||||
$(MAKE) -C go-rwkv clean
|
$(MAKE) -C go-rwkv clean
|
||||||
@ -240,7 +249,8 @@ clean: ## Remove build related file
|
|||||||
$(GOCMD) clean -cache
|
$(GOCMD) clean -cache
|
||||||
rm -f prepare
|
rm -f prepare
|
||||||
rm -rf ./go-llama
|
rm -rf ./go-llama
|
||||||
rm -rf ./gpt4all
|
rm -rf ./gpt4all
|
||||||
|
rm -rf ./go-llama-stable
|
||||||
rm -rf ./go-gpt2
|
rm -rf ./go-gpt2
|
||||||
rm -rf ./go-stable-diffusion
|
rm -rf ./go-stable-diffusion
|
||||||
rm -rf ./go-ggml-transformers
|
rm -rf ./go-ggml-transformers
|
||||||
@ -353,6 +363,7 @@ backend-assets/grpc/falcon: backend-assets/grpc go-ggllm/libggllm.a
|
|||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/falcon ./cmd/grpc/falcon/
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/falcon ./cmd/grpc/falcon/
|
||||||
|
|
||||||
backend-assets/grpc/llama: backend-assets/grpc go-llama/libbinding.a
|
backend-assets/grpc/llama: backend-assets/grpc go-llama/libbinding.a
|
||||||
|
$(GOCMD) mod edit -replace github.com/go-skynet/go-llama.cpp=$(shell pwd)/go-llama
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(shell pwd)/go-llama LIBRARY_PATH=$(shell pwd)/go-llama \
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(shell pwd)/go-llama LIBRARY_PATH=$(shell pwd)/go-llama \
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/llama ./cmd/grpc/llama/
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/llama ./cmd/grpc/llama/
|
||||||
# TODO: every binary should have its own folder instead, so can have different metal implementations
|
# TODO: every binary should have its own folder instead, so can have different metal implementations
|
||||||
@ -360,6 +371,11 @@ ifeq ($(BUILD_TYPE),metal)
|
|||||||
cp go-llama/build/bin/ggml-metal.metal backend-assets/grpc/
|
cp go-llama/build/bin/ggml-metal.metal backend-assets/grpc/
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
backend-assets/grpc/llama-stable: backend-assets/grpc go-llama-stable/libbinding.a
|
||||||
|
$(GOCMD) mod edit -replace github.com/go-skynet/go-llama.cpp=$(shell pwd)/go-llama-stable
|
||||||
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(shell pwd)/go-llama-stable LIBRARY_PATH=$(shell pwd)/go-llama \
|
||||||
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/llama-stable ./cmd/grpc/llama-stable/
|
||||||
|
|
||||||
backend-assets/grpc/gpt4all: backend-assets/grpc backend-assets/gpt4all gpt4all/gpt4all-bindings/golang/libgpt4all.a
|
backend-assets/grpc/gpt4all: backend-assets/grpc backend-assets/gpt4all gpt4all/gpt4all-bindings/golang/libgpt4all.a
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(shell pwd)/gpt4all/gpt4all-bindings/golang/ LIBRARY_PATH=$(shell pwd)/gpt4all/gpt4all-bindings/golang/ \
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(shell pwd)/gpt4all/gpt4all-bindings/golang/ LIBRARY_PATH=$(shell pwd)/gpt4all/gpt4all-bindings/golang/ \
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/gpt4all ./cmd/grpc/gpt4all/
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/gpt4all ./cmd/grpc/gpt4all/
|
||||||
|
21
cmd/grpc/llama-stable/main.go
Normal file
21
cmd/grpc/llama-stable/main.go
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
|
||||||
|
llama "github.com/go-skynet/LocalAI/pkg/backend/llm/llama-stable"
|
||||||
|
|
||||||
|
grpc "github.com/go-skynet/LocalAI/pkg/grpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
addr = flag.String("addr", "localhost:50051", "the address to connect to")
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
if err := grpc.StartServer(*addr, &llama.LLM{}); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
204
pkg/backend/llm/llama-stable/llama.go
Normal file
204
pkg/backend/llm/llama-stable/llama.go
Normal file
@ -0,0 +1,204 @@
|
|||||||
|
package llama
|
||||||
|
|
||||||
|
// This is a wrapper to statisfy the GRPC service interface
|
||||||
|
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/go-skynet/LocalAI/pkg/grpc/base"
|
||||||
|
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
|
||||||
|
"github.com/go-skynet/go-llama.cpp"
|
||||||
|
)
|
||||||
|
|
||||||
|
type LLM struct {
|
||||||
|
base.SingleThread
|
||||||
|
|
||||||
|
llama *llama.LLama
|
||||||
|
}
|
||||||
|
|
||||||
|
func (llm *LLM) Load(opts *pb.ModelOptions) error {
|
||||||
|
ropeFreqBase := float32(10000)
|
||||||
|
ropeFreqScale := float32(1)
|
||||||
|
|
||||||
|
if opts.RopeFreqBase != 0 {
|
||||||
|
ropeFreqBase = opts.RopeFreqBase
|
||||||
|
}
|
||||||
|
if opts.RopeFreqScale != 0 {
|
||||||
|
ropeFreqScale = opts.RopeFreqScale
|
||||||
|
}
|
||||||
|
|
||||||
|
llamaOpts := []llama.ModelOption{
|
||||||
|
llama.WithRopeFreqBase(ropeFreqBase),
|
||||||
|
llama.WithRopeFreqScale(ropeFreqScale),
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.NGQA != 0 {
|
||||||
|
llamaOpts = append(llamaOpts, llama.WithGQA(int(opts.NGQA)))
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.RMSNormEps != 0 {
|
||||||
|
llamaOpts = append(llamaOpts, llama.WithRMSNormEPS(opts.RMSNormEps))
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.ContextSize != 0 {
|
||||||
|
llamaOpts = append(llamaOpts, llama.SetContext(int(opts.ContextSize)))
|
||||||
|
}
|
||||||
|
if opts.F16Memory {
|
||||||
|
llamaOpts = append(llamaOpts, llama.EnableF16Memory)
|
||||||
|
}
|
||||||
|
if opts.Embeddings {
|
||||||
|
llamaOpts = append(llamaOpts, llama.EnableEmbeddings)
|
||||||
|
}
|
||||||
|
if opts.NGPULayers != 0 {
|
||||||
|
llamaOpts = append(llamaOpts, llama.SetGPULayers(int(opts.NGPULayers)))
|
||||||
|
}
|
||||||
|
|
||||||
|
llamaOpts = append(llamaOpts, llama.SetMMap(opts.MMap))
|
||||||
|
llamaOpts = append(llamaOpts, llama.SetMainGPU(opts.MainGPU))
|
||||||
|
llamaOpts = append(llamaOpts, llama.SetTensorSplit(opts.TensorSplit))
|
||||||
|
if opts.NBatch != 0 {
|
||||||
|
llamaOpts = append(llamaOpts, llama.SetNBatch(int(opts.NBatch)))
|
||||||
|
} else {
|
||||||
|
llamaOpts = append(llamaOpts, llama.SetNBatch(512))
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.NUMA {
|
||||||
|
llamaOpts = append(llamaOpts, llama.EnableNUMA)
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.LowVRAM {
|
||||||
|
llamaOpts = append(llamaOpts, llama.EnabelLowVRAM)
|
||||||
|
}
|
||||||
|
|
||||||
|
model, err := llama.New(opts.ModelFile, llamaOpts...)
|
||||||
|
llm.llama = model
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildPredictOptions(opts *pb.PredictOptions) []llama.PredictOption {
|
||||||
|
ropeFreqBase := float32(10000)
|
||||||
|
ropeFreqScale := float32(1)
|
||||||
|
|
||||||
|
if opts.RopeFreqBase != 0 {
|
||||||
|
ropeFreqBase = opts.RopeFreqBase
|
||||||
|
}
|
||||||
|
if opts.RopeFreqScale != 0 {
|
||||||
|
ropeFreqScale = opts.RopeFreqScale
|
||||||
|
}
|
||||||
|
predictOptions := []llama.PredictOption{
|
||||||
|
llama.SetTemperature(opts.Temperature),
|
||||||
|
llama.SetTopP(opts.TopP),
|
||||||
|
llama.SetTopK(int(opts.TopK)),
|
||||||
|
llama.SetTokens(int(opts.Tokens)),
|
||||||
|
llama.SetThreads(int(opts.Threads)),
|
||||||
|
llama.WithGrammar(opts.Grammar),
|
||||||
|
llama.SetRopeFreqBase(ropeFreqBase),
|
||||||
|
llama.SetRopeFreqScale(ropeFreqScale),
|
||||||
|
llama.SetNegativePromptScale(opts.NegativePromptScale),
|
||||||
|
llama.SetNegativePrompt(opts.NegativePrompt),
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.PromptCacheAll {
|
||||||
|
predictOptions = append(predictOptions, llama.EnablePromptCacheAll)
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.PromptCacheRO {
|
||||||
|
predictOptions = append(predictOptions, llama.EnablePromptCacheRO)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expected absolute path
|
||||||
|
if opts.PromptCachePath != "" {
|
||||||
|
predictOptions = append(predictOptions, llama.SetPathPromptCache(opts.PromptCachePath))
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Mirostat != 0 {
|
||||||
|
predictOptions = append(predictOptions, llama.SetMirostat(int(opts.Mirostat)))
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.MirostatETA != 0 {
|
||||||
|
predictOptions = append(predictOptions, llama.SetMirostatETA(opts.MirostatETA))
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.MirostatTAU != 0 {
|
||||||
|
predictOptions = append(predictOptions, llama.SetMirostatTAU(opts.MirostatTAU))
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Debug {
|
||||||
|
predictOptions = append(predictOptions, llama.Debug)
|
||||||
|
}
|
||||||
|
|
||||||
|
predictOptions = append(predictOptions, llama.SetStopWords(opts.StopPrompts...))
|
||||||
|
|
||||||
|
if opts.PresencePenalty != 0 {
|
||||||
|
predictOptions = append(predictOptions, llama.SetPenalty(opts.PresencePenalty))
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.NKeep != 0 {
|
||||||
|
predictOptions = append(predictOptions, llama.SetNKeep(int(opts.NKeep)))
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Batch != 0 {
|
||||||
|
predictOptions = append(predictOptions, llama.SetBatch(int(opts.Batch)))
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.F16KV {
|
||||||
|
predictOptions = append(predictOptions, llama.EnableF16KV)
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.IgnoreEOS {
|
||||||
|
predictOptions = append(predictOptions, llama.IgnoreEOS)
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Seed != 0 {
|
||||||
|
predictOptions = append(predictOptions, llama.SetSeed(int(opts.Seed)))
|
||||||
|
}
|
||||||
|
|
||||||
|
//predictOptions = append(predictOptions, llama.SetLogitBias(c.Seed))
|
||||||
|
|
||||||
|
predictOptions = append(predictOptions, llama.SetFrequencyPenalty(opts.FrequencyPenalty))
|
||||||
|
predictOptions = append(predictOptions, llama.SetMlock(opts.MLock))
|
||||||
|
predictOptions = append(predictOptions, llama.SetMemoryMap(opts.MMap))
|
||||||
|
predictOptions = append(predictOptions, llama.SetPredictionMainGPU(opts.MainGPU))
|
||||||
|
predictOptions = append(predictOptions, llama.SetPredictionTensorSplit(opts.TensorSplit))
|
||||||
|
predictOptions = append(predictOptions, llama.SetTailFreeSamplingZ(opts.TailFreeSamplingZ))
|
||||||
|
predictOptions = append(predictOptions, llama.SetTypicalP(opts.TypicalP))
|
||||||
|
return predictOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
func (llm *LLM) Predict(opts *pb.PredictOptions) (string, error) {
|
||||||
|
return llm.llama.Predict(opts.Prompt, buildPredictOptions(opts)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (llm *LLM) PredictStream(opts *pb.PredictOptions, results chan string) error {
|
||||||
|
predictOptions := buildPredictOptions(opts)
|
||||||
|
|
||||||
|
predictOptions = append(predictOptions, llama.SetTokenCallback(func(token string) bool {
|
||||||
|
results <- token
|
||||||
|
return true
|
||||||
|
}))
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
_, err := llm.llama.Predict(opts.Prompt, predictOptions...)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("err: ", err)
|
||||||
|
}
|
||||||
|
close(results)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (llm *LLM) Embeddings(opts *pb.PredictOptions) ([]float32, error) {
|
||||||
|
predictOptions := buildPredictOptions(opts)
|
||||||
|
|
||||||
|
if len(opts.EmbeddingTokens) > 0 {
|
||||||
|
tokens := []int{}
|
||||||
|
for _, t := range opts.EmbeddingTokens {
|
||||||
|
tokens = append(tokens, int(t))
|
||||||
|
}
|
||||||
|
return llm.llama.TokenEmbeddings(tokens, predictOptions...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return llm.llama.Embeddings(opts.Embeddings, predictOptions...)
|
||||||
|
}
|
@ -16,6 +16,7 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
LlamaBackend = "llama"
|
LlamaBackend = "llama"
|
||||||
|
LlamaStableBackend = "llama-stable"
|
||||||
BloomzBackend = "bloomz"
|
BloomzBackend = "bloomz"
|
||||||
StarcoderBackend = "starcoder"
|
StarcoderBackend = "starcoder"
|
||||||
GPTJBackend = "gptj"
|
GPTJBackend = "gptj"
|
||||||
@ -41,6 +42,7 @@ const (
|
|||||||
|
|
||||||
var AutoLoadBackends []string = []string{
|
var AutoLoadBackends []string = []string{
|
||||||
LlamaBackend,
|
LlamaBackend,
|
||||||
|
LlamaStableBackend,
|
||||||
Gpt4All,
|
Gpt4All,
|
||||||
FalconBackend,
|
FalconBackend,
|
||||||
GPTNeoXBackend,
|
GPTNeoXBackend,
|
||||||
@ -173,7 +175,7 @@ func (ml *ModelLoader) BackendLoader(opts ...Option) (model *grpc.Client, err er
|
|||||||
}
|
}
|
||||||
|
|
||||||
switch backend {
|
switch backend {
|
||||||
case LlamaBackend, GPTJBackend, DollyBackend,
|
case LlamaBackend, LlamaStableBackend, GPTJBackend, DollyBackend,
|
||||||
MPTBackend, Gpt2Backend, FalconBackend,
|
MPTBackend, Gpt2Backend, FalconBackend,
|
||||||
GPTNeoXBackend, ReplitBackend, StarcoderBackend, BloomzBackend,
|
GPTNeoXBackend, ReplitBackend, StarcoderBackend, BloomzBackend,
|
||||||
RwkvBackend, LCHuggingFaceBackend, BertEmbeddingsBackend, FalconGGMLBackend, StableDiffusionBackend, WhisperBackend:
|
RwkvBackend, LCHuggingFaceBackend, BertEmbeddingsBackend, FalconGGMLBackend, StableDiffusionBackend, WhisperBackend:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user