mirror of
https://github.com/mudler/LocalAI.git
synced 2024-12-18 20:27:57 +00:00
Revert "feat: include tokens usage for streamed output (#4282)"
This reverts commit 0d6c3a7d57
.
This commit is contained in:
parent
87b7648591
commit
184fbc26bf
@ -117,12 +117,8 @@ func ModelInference(ctx context.Context, s string, messages []schema.Message, im
|
|||||||
ss := ""
|
ss := ""
|
||||||
|
|
||||||
var partialRune []byte
|
var partialRune []byte
|
||||||
err := inferenceModel.PredictStream(ctx, opts, func(reply *proto.Reply) {
|
err := inferenceModel.PredictStream(ctx, opts, func(chars []byte) {
|
||||||
msg := reply.GetMessage()
|
partialRune = append(partialRune, chars...)
|
||||||
partialRune = append(partialRune, msg...)
|
|
||||||
|
|
||||||
tokenUsage.Prompt = int(reply.PromptTokens)
|
|
||||||
tokenUsage.Completion = int(reply.Tokens)
|
|
||||||
|
|
||||||
for len(partialRune) > 0 {
|
for len(partialRune) > 0 {
|
||||||
r, size := utf8.DecodeRune(partialRune)
|
r, size := utf8.DecodeRune(partialRune)
|
||||||
@ -136,10 +132,6 @@ func ModelInference(ctx context.Context, s string, messages []schema.Message, im
|
|||||||
|
|
||||||
partialRune = partialRune[size:]
|
partialRune = partialRune[size:]
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(msg) == 0 {
|
|
||||||
tokenCallback("", tokenUsage)
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
return LLMResponse{
|
return LLMResponse{
|
||||||
Response: ss,
|
Response: ss,
|
||||||
|
@ -39,15 +39,11 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup
|
|||||||
responses <- initialMessage
|
responses <- initialMessage
|
||||||
|
|
||||||
ComputeChoices(req, s, config, startupOptions, loader, func(s string, c *[]schema.Choice) {}, func(s string, usage backend.TokenUsage) bool {
|
ComputeChoices(req, s, config, startupOptions, loader, func(s string, c *[]schema.Choice) {}, func(s string, usage backend.TokenUsage) bool {
|
||||||
choices := []schema.Choice{}
|
|
||||||
if s != "" {
|
|
||||||
choices = append(choices, schema.Choice{Delta: &schema.Message{Content: &s}, Index: 0})
|
|
||||||
}
|
|
||||||
resp := schema.OpenAIResponse{
|
resp := schema.OpenAIResponse{
|
||||||
ID: id,
|
ID: id,
|
||||||
Created: created,
|
Created: created,
|
||||||
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
|
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
|
||||||
Choices: choices,
|
Choices: []schema.Choice{{Delta: &schema.Message{Content: &s}, Index: 0}},
|
||||||
Object: "chat.completion.chunk",
|
Object: "chat.completion.chunk",
|
||||||
Usage: schema.OpenAIUsage{
|
Usage: schema.OpenAIUsage{
|
||||||
PromptTokens: usage.Prompt,
|
PromptTokens: usage.Prompt,
|
||||||
@ -469,9 +465,6 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup
|
|||||||
toolsCalled := false
|
toolsCalled := false
|
||||||
for ev := range responses {
|
for ev := range responses {
|
||||||
usage = &ev.Usage // Copy a pointer to the latest usage chunk so that the stop message can reference it
|
usage = &ev.Usage // Copy a pointer to the latest usage chunk so that the stop message can reference it
|
||||||
if len(ev.Choices) == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if len(ev.Choices[0].Delta.ToolCalls) > 0 {
|
if len(ev.Choices[0].Delta.ToolCalls) > 0 {
|
||||||
toolsCalled = true
|
toolsCalled = true
|
||||||
}
|
}
|
||||||
|
@ -37,7 +37,7 @@ type Backend interface {
|
|||||||
Embeddings(ctx context.Context, in *pb.PredictOptions, opts ...grpc.CallOption) (*pb.EmbeddingResult, error)
|
Embeddings(ctx context.Context, in *pb.PredictOptions, opts ...grpc.CallOption) (*pb.EmbeddingResult, error)
|
||||||
Predict(ctx context.Context, in *pb.PredictOptions, opts ...grpc.CallOption) (*pb.Reply, error)
|
Predict(ctx context.Context, in *pb.PredictOptions, opts ...grpc.CallOption) (*pb.Reply, error)
|
||||||
LoadModel(ctx context.Context, in *pb.ModelOptions, opts ...grpc.CallOption) (*pb.Result, error)
|
LoadModel(ctx context.Context, in *pb.ModelOptions, opts ...grpc.CallOption) (*pb.Result, error)
|
||||||
PredictStream(ctx context.Context, in *pb.PredictOptions, f func(reply *pb.Reply), opts ...grpc.CallOption) error
|
PredictStream(ctx context.Context, in *pb.PredictOptions, f func(s []byte), opts ...grpc.CallOption) error
|
||||||
GenerateImage(ctx context.Context, in *pb.GenerateImageRequest, opts ...grpc.CallOption) (*pb.Result, error)
|
GenerateImage(ctx context.Context, in *pb.GenerateImageRequest, opts ...grpc.CallOption) (*pb.Result, error)
|
||||||
TTS(ctx context.Context, in *pb.TTSRequest, opts ...grpc.CallOption) (*pb.Result, error)
|
TTS(ctx context.Context, in *pb.TTSRequest, opts ...grpc.CallOption) (*pb.Result, error)
|
||||||
SoundGeneration(ctx context.Context, in *pb.SoundGenerationRequest, opts ...grpc.CallOption) (*pb.Result, error)
|
SoundGeneration(ctx context.Context, in *pb.SoundGenerationRequest, opts ...grpc.CallOption) (*pb.Result, error)
|
||||||
|
@ -136,7 +136,7 @@ func (c *Client) LoadModel(ctx context.Context, in *pb.ModelOptions, opts ...grp
|
|||||||
return client.LoadModel(ctx, in, opts...)
|
return client.LoadModel(ctx, in, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) PredictStream(ctx context.Context, in *pb.PredictOptions, f func(reply *pb.Reply), opts ...grpc.CallOption) error {
|
func (c *Client) PredictStream(ctx context.Context, in *pb.PredictOptions, f func(s []byte), opts ...grpc.CallOption) error {
|
||||||
if !c.parallel {
|
if !c.parallel {
|
||||||
c.opMutex.Lock()
|
c.opMutex.Lock()
|
||||||
defer c.opMutex.Unlock()
|
defer c.opMutex.Unlock()
|
||||||
@ -158,7 +158,7 @@ func (c *Client) PredictStream(ctx context.Context, in *pb.PredictOptions, f fun
|
|||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
reply, err := stream.Recv()
|
feature, err := stream.Recv()
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -167,7 +167,7 @@ func (c *Client) PredictStream(ctx context.Context, in *pb.PredictOptions, f fun
|
|||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
f(reply)
|
f(feature.GetMessage())
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -35,7 +35,7 @@ func (e *embedBackend) LoadModel(ctx context.Context, in *pb.ModelOptions, opts
|
|||||||
return e.s.LoadModel(ctx, in)
|
return e.s.LoadModel(ctx, in)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *embedBackend) PredictStream(ctx context.Context, in *pb.PredictOptions, f func(reply *pb.Reply), opts ...grpc.CallOption) error {
|
func (e *embedBackend) PredictStream(ctx context.Context, in *pb.PredictOptions, f func(s []byte), opts ...grpc.CallOption) error {
|
||||||
bs := &embedBackendServerStream{
|
bs := &embedBackendServerStream{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
fn: f,
|
fn: f,
|
||||||
@ -97,11 +97,11 @@ func (e *embedBackend) GetTokenMetrics(ctx context.Context, in *pb.MetricsReques
|
|||||||
|
|
||||||
type embedBackendServerStream struct {
|
type embedBackendServerStream struct {
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
fn func(reply *pb.Reply)
|
fn func(s []byte)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *embedBackendServerStream) Send(reply *pb.Reply) error {
|
func (e *embedBackendServerStream) Send(reply *pb.Reply) error {
|
||||||
e.fn(reply)
|
e.fn(reply.GetMessage())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user