2023-07-14 23:19:43 +00:00
|
|
|
package backend
|
|
|
|
|
|
|
|
import (
|
2023-07-24 21:10:54 +00:00
|
|
|
"context"
|
2023-07-20 20:10:12 +00:00
|
|
|
"os"
|
2023-07-14 23:19:43 +00:00
|
|
|
"regexp"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
|
|
|
|
config "github.com/go-skynet/LocalAI/api/config"
|
|
|
|
"github.com/go-skynet/LocalAI/api/options"
|
2023-07-20 20:10:12 +00:00
|
|
|
"github.com/go-skynet/LocalAI/pkg/gallery"
|
2023-07-14 23:19:43 +00:00
|
|
|
"github.com/go-skynet/LocalAI/pkg/grpc"
|
|
|
|
model "github.com/go-skynet/LocalAI/pkg/model"
|
2023-07-20 20:10:12 +00:00
|
|
|
"github.com/go-skynet/LocalAI/pkg/utils"
|
2023-07-14 23:19:43 +00:00
|
|
|
)
|
|
|
|
|
2023-08-18 19:23:14 +00:00
|
|
|
type LLMResponse struct {
|
|
|
|
Response string // should this be []byte?
|
|
|
|
Usage TokenUsage
|
|
|
|
}
|
|
|
|
|
|
|
|
type TokenUsage struct {
|
|
|
|
Prompt int
|
|
|
|
Completion int
|
|
|
|
}
|
|
|
|
|
|
|
|
func ModelInference(ctx context.Context, s string, loader *model.ModelLoader, c config.Config, o *options.Option, tokenCallback func(string, TokenUsage) bool) (func() (LLMResponse, error), error) {
|
2023-07-14 23:19:43 +00:00
|
|
|
modelFile := c.Model
|
|
|
|
|
|
|
|
grpcOpts := gRPCModelOpts(c)
|
|
|
|
|
2023-07-14 23:19:43 +00:00
|
|
|
var inferenceModel *grpc.Client
|
2023-07-14 23:19:43 +00:00
|
|
|
var err error
|
|
|
|
|
|
|
|
opts := []model.Option{
|
2023-08-09 06:38:51 +00:00
|
|
|
model.WithLoadGRPCLoadModelOpts(grpcOpts),
|
2023-07-14 23:19:43 +00:00
|
|
|
model.WithThreads(uint32(c.Threads)), // some models uses this to allocate threads during startup
|
2023-07-14 23:19:43 +00:00
|
|
|
model.WithAssetDir(o.AssetsDestination),
|
2023-08-07 20:39:10 +00:00
|
|
|
model.WithModel(modelFile),
|
2023-07-14 23:19:43 +00:00
|
|
|
model.WithContext(o.Context),
|
2023-07-14 23:19:43 +00:00
|
|
|
}
|
|
|
|
|
2023-08-15 23:11:32 +00:00
|
|
|
if c.GRPC.Attempts != 0 {
|
|
|
|
opts = append(opts, model.WithGRPCAttempts(c.GRPC.Attempts))
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.GRPC.AttemptsSleepTime != 0 {
|
|
|
|
opts = append(opts, model.WithGRPCAttemptsDelay(c.GRPC.AttemptsSleepTime))
|
|
|
|
}
|
|
|
|
|
2023-07-20 20:10:12 +00:00
|
|
|
for k, v := range o.ExternalGRPCBackends {
|
|
|
|
opts = append(opts, model.WithExternalBackend(k, v))
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.Backend != "" {
|
|
|
|
opts = append(opts, model.WithBackendString(c.Backend))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the modelFile exists, if it doesn't try to load it from the gallery
|
|
|
|
if o.AutoloadGalleries { // experimental
|
|
|
|
if _, err := os.Stat(modelFile); os.IsNotExist(err) {
|
|
|
|
utils.ResetDownloadTimers()
|
|
|
|
// if we failed to load the model, we try to download it
|
|
|
|
err := gallery.InstallModelFromGalleryByName(o.Galleries, modelFile, loader.ModelPath, gallery.GalleryModel{}, utils.DisplayDownloadFunction)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-14 23:19:43 +00:00
|
|
|
if c.Backend == "" {
|
|
|
|
inferenceModel, err = loader.GreedyLoader(opts...)
|
|
|
|
} else {
|
|
|
|
inferenceModel, err = loader.BackendLoader(opts...)
|
|
|
|
}
|
2023-07-20 20:10:12 +00:00
|
|
|
|
2023-07-14 23:19:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-07-14 23:19:43 +00:00
|
|
|
// in GRPC, the backend is supposed to answer to 1 single token if stream is not supported
|
2023-08-18 19:23:14 +00:00
|
|
|
fn := func() (LLMResponse, error) {
|
2023-07-14 23:19:43 +00:00
|
|
|
opts := gRPCPredictOpts(c, loader.ModelPath)
|
|
|
|
opts.Prompt = s
|
2023-08-18 19:23:14 +00:00
|
|
|
|
|
|
|
tokenUsage := TokenUsage{}
|
|
|
|
|
|
|
|
// check the per-model feature flag for usage, since tokenCallback may have a cost, but default to on.
|
|
|
|
if !c.FeatureFlag["usage"] {
|
|
|
|
userTokenCallback := tokenCallback
|
|
|
|
if userTokenCallback == nil {
|
|
|
|
userTokenCallback = func(token string, usage TokenUsage) bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
promptInfo, pErr := inferenceModel.TokenizeString(ctx, opts)
|
|
|
|
if pErr == nil && promptInfo.Length > 0 {
|
|
|
|
tokenUsage.Prompt = int(promptInfo.Length)
|
|
|
|
}
|
|
|
|
|
|
|
|
tokenCallback = func(token string, usage TokenUsage) bool {
|
|
|
|
tokenUsage.Completion++
|
|
|
|
return userTokenCallback(token, tokenUsage)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-14 23:19:43 +00:00
|
|
|
if tokenCallback != nil {
|
|
|
|
ss := ""
|
2023-07-27 16:41:04 +00:00
|
|
|
err := inferenceModel.PredictStream(ctx, opts, func(s []byte) {
|
2023-08-18 19:23:14 +00:00
|
|
|
tokenCallback(string(s), tokenUsage)
|
2023-07-27 16:41:04 +00:00
|
|
|
ss += string(s)
|
2023-07-14 23:19:43 +00:00
|
|
|
})
|
2023-08-18 19:23:14 +00:00
|
|
|
return LLMResponse{
|
|
|
|
Response: ss,
|
|
|
|
Usage: tokenUsage,
|
|
|
|
}, err
|
2023-07-14 23:19:43 +00:00
|
|
|
} else {
|
2023-08-18 19:23:14 +00:00
|
|
|
// TODO: Is the chicken bit the only way to get here? is that acceptable?
|
2023-07-24 21:10:54 +00:00
|
|
|
reply, err := inferenceModel.Predict(ctx, opts)
|
2023-07-20 22:52:43 +00:00
|
|
|
if err != nil {
|
2023-08-18 19:23:14 +00:00
|
|
|
return LLMResponse{}, err
|
2023-07-20 22:52:43 +00:00
|
|
|
}
|
2023-08-18 19:23:14 +00:00
|
|
|
return LLMResponse{
|
|
|
|
Response: string(reply.Message),
|
|
|
|
Usage: tokenUsage,
|
|
|
|
}, err
|
2023-07-14 23:19:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-18 19:23:14 +00:00
|
|
|
return fn, nil
|
2023-07-14 23:19:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var cutstrings map[string]*regexp.Regexp = make(map[string]*regexp.Regexp)
|
|
|
|
var mu sync.Mutex = sync.Mutex{}
|
|
|
|
|
|
|
|
func Finetune(config config.Config, input, prediction string) string {
|
|
|
|
if config.Echo {
|
|
|
|
prediction = input + prediction
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, c := range config.Cutstrings {
|
|
|
|
mu.Lock()
|
|
|
|
reg, ok := cutstrings[c]
|
|
|
|
if !ok {
|
|
|
|
cutstrings[c] = regexp.MustCompile(c)
|
|
|
|
reg = cutstrings[c]
|
|
|
|
}
|
|
|
|
mu.Unlock()
|
|
|
|
prediction = reg.ReplaceAllString(prediction, "")
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, c := range config.TrimSpace {
|
|
|
|
prediction = strings.TrimSpace(strings.TrimPrefix(prediction, c))
|
|
|
|
}
|
|
|
|
return prediction
|
|
|
|
|
|
|
|
}
|