mirror of
https://github.com/mudler/LocalAI.git
synced 2024-12-21 13:37:51 +00:00
255748bcba
This PR specifically introduces a `core` folder and moves the following packages over, without any other changes: - `api/backend` - `api/config` - `api/options` - `api/schema` Once this is merged and we confirm there's no regressions, I can migrate over the remaining changes piece by piece to split up application startup, backend services, http, and mqtt as was the goal of the earlier PRs!
56 lines
1.4 KiB
Go
56 lines
1.4 KiB
Go
package openai
|
|
|
|
import (
|
|
"github.com/go-skynet/LocalAI/core/backend"
|
|
config "github.com/go-skynet/LocalAI/core/config"
|
|
"github.com/go-skynet/LocalAI/core/options"
|
|
"github.com/go-skynet/LocalAI/core/schema"
|
|
model "github.com/go-skynet/LocalAI/pkg/model"
|
|
)
|
|
|
|
func ComputeChoices(
|
|
req *schema.OpenAIRequest,
|
|
predInput string,
|
|
config *config.Config,
|
|
o *options.Option,
|
|
loader *model.ModelLoader,
|
|
cb func(string, *[]schema.Choice),
|
|
tokenCallback func(string, backend.TokenUsage) bool) ([]schema.Choice, backend.TokenUsage, error) {
|
|
n := req.N // number of completions to return
|
|
result := []schema.Choice{}
|
|
|
|
if n == 0 {
|
|
n = 1
|
|
}
|
|
|
|
images := []string{}
|
|
for _, m := range req.Messages {
|
|
images = append(images, m.StringImages...)
|
|
}
|
|
|
|
// get the model function to call for the result
|
|
predFunc, err := backend.ModelInference(req.Context, predInput, images, loader, *config, o, tokenCallback)
|
|
if err != nil {
|
|
return result, backend.TokenUsage{}, err
|
|
}
|
|
|
|
tokenUsage := backend.TokenUsage{}
|
|
|
|
for i := 0; i < n; i++ {
|
|
prediction, err := predFunc()
|
|
if err != nil {
|
|
return result, backend.TokenUsage{}, err
|
|
}
|
|
|
|
tokenUsage.Prompt += prediction.Usage.Prompt
|
|
tokenUsage.Completion += prediction.Usage.Completion
|
|
|
|
finetunedResponse := backend.Finetune(*config, predInput, prediction.Response)
|
|
cb(finetunedResponse, &result)
|
|
|
|
//result = append(result, Choice{Text: prediction})
|
|
|
|
}
|
|
return result, tokenUsage, err
|
|
}
|