2024-01-05 17:04:46 +00:00
|
|
|
package openai
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bufio"
|
|
|
|
"bytes"
|
|
|
|
"encoding/json"
|
2024-04-17 21:33:49 +00:00
|
|
|
"errors"
|
2024-01-05 17:04:46 +00:00
|
|
|
"fmt"
|
2024-04-17 21:33:49 +00:00
|
|
|
"time"
|
2024-01-05 17:04:46 +00:00
|
|
|
|
2024-06-23 08:24:36 +00:00
|
|
|
"github.com/mudler/LocalAI/core/backend"
|
|
|
|
"github.com/mudler/LocalAI/core/config"
|
2024-03-01 15:19:53 +00:00
|
|
|
|
2024-01-05 17:04:46 +00:00
|
|
|
"github.com/gofiber/fiber/v2"
|
2024-04-17 21:33:49 +00:00
|
|
|
"github.com/google/uuid"
|
2024-06-23 08:24:36 +00:00
|
|
|
"github.com/mudler/LocalAI/core/schema"
|
|
|
|
"github.com/mudler/LocalAI/pkg/functions"
|
|
|
|
model "github.com/mudler/LocalAI/pkg/model"
|
2024-01-05 17:04:46 +00:00
|
|
|
"github.com/rs/zerolog/log"
|
|
|
|
"github.com/valyala/fasthttp"
|
|
|
|
)
|
|
|
|
|
2024-03-29 21:29:33 +00:00
|
|
|
// CompletionEndpoint is the OpenAI Completion API endpoint https://platform.openai.com/docs/api-reference/completions
|
|
|
|
// @Summary Generate completions for a given prompt and model.
|
|
|
|
// @Param request body schema.OpenAIRequest true "query params"
|
|
|
|
// @Success 200 {object} schema.OpenAIResponse "Response"
|
|
|
|
// @Router /v1/completions [post]
|
2024-04-17 21:33:49 +00:00
|
|
|
func CompletionEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) func(c *fiber.Ctx) error {
|
|
|
|
id := uuid.New().String()
|
|
|
|
created := int(time.Now().Unix())
|
|
|
|
|
|
|
|
process := func(s string, req *schema.OpenAIRequest, config *config.BackendConfig, loader *model.ModelLoader, responses chan schema.OpenAIResponse) {
|
|
|
|
ComputeChoices(req, s, config, appConfig, loader, func(s string, c *[]schema.Choice) {}, func(s string, usage backend.TokenUsage) bool {
|
|
|
|
resp := schema.OpenAIResponse{
|
|
|
|
ID: id,
|
|
|
|
Created: created,
|
|
|
|
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
|
|
|
|
Choices: []schema.Choice{
|
|
|
|
{
|
|
|
|
Index: 0,
|
|
|
|
Text: s,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Object: "text_completion",
|
|
|
|
Usage: schema.OpenAIUsage{
|
|
|
|
PromptTokens: usage.Prompt,
|
|
|
|
CompletionTokens: usage.Completion,
|
|
|
|
TotalTokens: usage.Prompt + usage.Completion,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
log.Debug().Msgf("Sending goroutine: %s", s)
|
|
|
|
|
|
|
|
responses <- resp
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
close(responses)
|
|
|
|
}
|
|
|
|
|
2024-01-05 17:04:46 +00:00
|
|
|
return func(c *fiber.Ctx) error {
|
2024-09-28 15:23:56 +00:00
|
|
|
// Add Correlation
|
|
|
|
c.Set("X-Correlation-ID", id)
|
2024-07-10 13:28:39 +00:00
|
|
|
modelFile, input, err := readRequest(c, cl, ml, appConfig, true)
|
2024-01-05 17:04:46 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed reading parameters from request:%w", err)
|
|
|
|
}
|
|
|
|
|
2024-04-17 21:33:49 +00:00
|
|
|
log.Debug().Msgf("`input`: %+v", input)
|
2024-01-05 17:04:46 +00:00
|
|
|
|
2024-04-17 21:33:49 +00:00
|
|
|
config, input, err := mergeRequestWithConfig(modelFile, input, cl, ml, appConfig.Debug, appConfig.Threads, appConfig.ContextSize, appConfig.F16)
|
2024-01-05 17:04:46 +00:00
|
|
|
if err != nil {
|
2024-04-17 21:33:49 +00:00
|
|
|
return fmt.Errorf("failed reading parameters from request:%w", err)
|
|
|
|
}
|
|
|
|
|
2024-05-29 12:40:54 +00:00
|
|
|
if config.ResponseFormatMap != nil {
|
|
|
|
d := schema.ChatCompletionResponseFormat{}
|
|
|
|
dat, _ := json.Marshal(config.ResponseFormatMap)
|
|
|
|
_ = json.Unmarshal(dat, &d)
|
|
|
|
if d.Type == "json_object" {
|
|
|
|
input.Grammar = functions.JSONBNF
|
|
|
|
}
|
2024-01-05 17:04:46 +00:00
|
|
|
}
|
|
|
|
|
2024-04-17 21:33:49 +00:00
|
|
|
config.Grammar = input.Grammar
|
|
|
|
|
|
|
|
log.Debug().Msgf("Parameter Config: %+v", config)
|
2024-01-05 17:04:46 +00:00
|
|
|
|
2024-04-17 21:33:49 +00:00
|
|
|
if input.Stream {
|
|
|
|
log.Debug().Msgf("Stream request received")
|
2024-01-05 17:04:46 +00:00
|
|
|
c.Context().SetContentType("text/event-stream")
|
|
|
|
//c.Response().Header.SetContentType(fiber.MIMETextHTMLCharsetUTF8)
|
|
|
|
//c.Set("Content-Type", "text/event-stream")
|
|
|
|
c.Set("Cache-Control", "no-cache")
|
|
|
|
c.Set("Connection", "keep-alive")
|
|
|
|
c.Set("Transfer-Encoding", "chunked")
|
2024-04-17 21:33:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
templateFile := ""
|
|
|
|
|
|
|
|
// A model can have a "file.bin.tmpl" file associated with a prompt template prefix
|
|
|
|
if ml.ExistsInModelPath(fmt.Sprintf("%s.tmpl", config.Model)) {
|
|
|
|
templateFile = config.Model
|
|
|
|
}
|
|
|
|
|
|
|
|
if config.TemplateConfig.Completion != "" {
|
|
|
|
templateFile = config.TemplateConfig.Completion
|
|
|
|
}
|
|
|
|
|
|
|
|
if input.Stream {
|
|
|
|
if len(config.PromptStrings) > 1 {
|
|
|
|
return errors.New("cannot handle more than 1 `PromptStrings` when Streaming")
|
|
|
|
}
|
|
|
|
|
|
|
|
predInput := config.PromptStrings[0]
|
|
|
|
|
|
|
|
if templateFile != "" {
|
|
|
|
templatedInput, err := ml.EvaluateTemplateForPrompt(model.CompletionPromptTemplate, templateFile, model.PromptTemplateData{
|
2024-05-30 16:50:26 +00:00
|
|
|
Input: predInput,
|
|
|
|
SystemPrompt: config.SystemPrompt,
|
2024-04-17 21:33:49 +00:00
|
|
|
})
|
|
|
|
if err == nil {
|
|
|
|
predInput = templatedInput
|
|
|
|
log.Debug().Msgf("Template found, input modified to: %s", predInput)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
responses := make(chan schema.OpenAIResponse)
|
|
|
|
|
|
|
|
go process(predInput, input, config, ml, responses)
|
2024-01-05 17:04:46 +00:00
|
|
|
|
|
|
|
c.Context().SetBodyStreamWriter(fasthttp.StreamWriter(func(w *bufio.Writer) {
|
2024-04-17 21:33:49 +00:00
|
|
|
|
|
|
|
for ev := range responses {
|
2024-01-05 17:04:46 +00:00
|
|
|
var buf bytes.Buffer
|
|
|
|
enc := json.NewEncoder(&buf)
|
2024-04-17 21:33:49 +00:00
|
|
|
enc.Encode(ev)
|
|
|
|
|
|
|
|
log.Debug().Msgf("Sending chunk: %s", buf.String())
|
2024-01-05 17:04:46 +00:00
|
|
|
fmt.Fprintf(w, "data: %v\n", buf.String())
|
|
|
|
w.Flush()
|
|
|
|
}
|
|
|
|
|
|
|
|
resp := &schema.OpenAIResponse{
|
2024-04-17 21:33:49 +00:00
|
|
|
ID: id,
|
|
|
|
Created: created,
|
|
|
|
Model: input.Model, // we have to return what the user sent here, due to OpenAI spec.
|
2024-01-05 17:04:46 +00:00
|
|
|
Choices: []schema.Choice{
|
|
|
|
{
|
|
|
|
Index: 0,
|
|
|
|
FinishReason: "stop",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Object: "text_completion",
|
|
|
|
}
|
|
|
|
respData, _ := json.Marshal(resp)
|
|
|
|
|
|
|
|
w.WriteString(fmt.Sprintf("data: %s\n\n", respData))
|
|
|
|
w.WriteString("data: [DONE]\n\n")
|
|
|
|
w.Flush()
|
|
|
|
}))
|
|
|
|
return nil
|
|
|
|
}
|
2024-04-17 21:33:49 +00:00
|
|
|
|
|
|
|
var result []schema.Choice
|
|
|
|
|
|
|
|
totalTokenUsage := backend.TokenUsage{}
|
|
|
|
|
|
|
|
for k, i := range config.PromptStrings {
|
|
|
|
if templateFile != "" {
|
|
|
|
// A model can have a "file.bin.tmpl" file associated with a prompt template prefix
|
|
|
|
templatedInput, err := ml.EvaluateTemplateForPrompt(model.CompletionPromptTemplate, templateFile, model.PromptTemplateData{
|
|
|
|
SystemPrompt: config.SystemPrompt,
|
|
|
|
Input: i,
|
|
|
|
})
|
|
|
|
if err == nil {
|
|
|
|
i = templatedInput
|
|
|
|
log.Debug().Msgf("Template found, input modified to: %s", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
r, tokenUsage, err := ComputeChoices(
|
|
|
|
input, i, config, appConfig, ml, func(s string, c *[]schema.Choice) {
|
|
|
|
*c = append(*c, schema.Choice{Text: s, FinishReason: "stop", Index: k})
|
|
|
|
}, nil)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
totalTokenUsage.Prompt += tokenUsage.Prompt
|
|
|
|
totalTokenUsage.Completion += tokenUsage.Completion
|
|
|
|
|
|
|
|
result = append(result, r...)
|
|
|
|
}
|
|
|
|
|
|
|
|
resp := &schema.OpenAIResponse{
|
|
|
|
ID: id,
|
|
|
|
Created: created,
|
|
|
|
Model: input.Model, // we have to return what the user sent here, due to OpenAI spec.
|
|
|
|
Choices: result,
|
|
|
|
Object: "text_completion",
|
|
|
|
Usage: schema.OpenAIUsage{
|
|
|
|
PromptTokens: totalTokenUsage.Prompt,
|
|
|
|
CompletionTokens: totalTokenUsage.Completion,
|
|
|
|
TotalTokens: totalTokenUsage.Prompt + totalTokenUsage.Completion,
|
|
|
|
},
|
2024-01-05 17:04:46 +00:00
|
|
|
}
|
2024-04-17 21:33:49 +00:00
|
|
|
|
|
|
|
jsonResult, _ := json.Marshal(resp)
|
2024-01-05 17:04:46 +00:00
|
|
|
log.Debug().Msgf("Response: %s", jsonResult)
|
|
|
|
|
|
|
|
// Return the prediction in the response body
|
2024-04-17 21:33:49 +00:00
|
|
|
return c.JSON(resp)
|
2024-01-05 17:04:46 +00:00
|
|
|
}
|
|
|
|
}
|