mirror of
https://github.com/mudler/LocalAI.git
synced 2025-01-18 10:46:46 +00:00
feat: extract output with regexes from LLMs (#3491)
* feat: extract output with regexes from LLMs This changset adds `extract_regex` to the LLM config. It is a list of regexes that can match output and will be used to re extract text from the LLM output. This is particularly useful for LLMs which outputs final results into tags. Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Add tests, enhance output in case of configuration error Signed-off-by: Ettore Di Giacinto <mudler@localai.io> --------- Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
d51444d606
commit
cf747bcdec
13
core/backend/backend_suite_test.go
Normal file
13
core/backend/backend_suite_test.go
Normal file
@ -0,0 +1,13 @@
|
||||
package backend_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestBackend(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Backend test suite")
|
||||
}
|
@ -9,6 +9,8 @@ import (
|
||||
"sync"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
|
||||
@ -181,13 +183,37 @@ func Finetune(config config.BackendConfig, input, prediction string) string {
|
||||
mu.Lock()
|
||||
reg, ok := cutstrings[c]
|
||||
if !ok {
|
||||
cutstrings[c] = regexp.MustCompile(c)
|
||||
r, err := regexp.Compile(c)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("failed to compile regex")
|
||||
}
|
||||
cutstrings[c] = r
|
||||
reg = cutstrings[c]
|
||||
}
|
||||
mu.Unlock()
|
||||
prediction = reg.ReplaceAllString(prediction, "")
|
||||
}
|
||||
|
||||
// extract results from the response which can be for instance inside XML tags
|
||||
var predResult string
|
||||
for _, r := range config.ExtractRegex {
|
||||
mu.Lock()
|
||||
reg, ok := cutstrings[r]
|
||||
if !ok {
|
||||
regex, err := regexp.Compile(r)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("failed to compile regex")
|
||||
}
|
||||
cutstrings[r] = regex
|
||||
reg = regex
|
||||
}
|
||||
mu.Unlock()
|
||||
predResult += reg.FindString(prediction)
|
||||
}
|
||||
if predResult != "" {
|
||||
prediction = predResult
|
||||
}
|
||||
|
||||
for _, c := range config.TrimSpace {
|
||||
prediction = strings.TrimSpace(strings.TrimPrefix(prediction, c))
|
||||
}
|
||||
|
109
core/backend/llm_test.go
Normal file
109
core/backend/llm_test.go
Normal file
@ -0,0 +1,109 @@
|
||||
package backend_test
|
||||
|
||||
import (
|
||||
. "github.com/mudler/LocalAI/core/backend"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("LLM tests", func() {
|
||||
Context("Finetune LLM output", func() {
|
||||
var (
|
||||
testConfig config.BackendConfig
|
||||
input string
|
||||
prediction string
|
||||
result string
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
testConfig = config.BackendConfig{
|
||||
PredictionOptions: schema.PredictionOptions{
|
||||
Echo: false,
|
||||
},
|
||||
LLMConfig: config.LLMConfig{
|
||||
Cutstrings: []string{`<.*?>`}, // Example regex for removing XML tags
|
||||
ExtractRegex: []string{`<result>(.*?)</result>`}, // Example regex to extract from tags
|
||||
TrimSpace: []string{" ", "\n"},
|
||||
TrimSuffix: []string{".", "!"},
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
Context("when echo is enabled", func() {
|
||||
BeforeEach(func() {
|
||||
testConfig.Echo = true
|
||||
input = "Hello"
|
||||
prediction = "World"
|
||||
})
|
||||
|
||||
It("should prepend input to prediction", func() {
|
||||
result = Finetune(testConfig, input, prediction)
|
||||
Expect(result).To(Equal("HelloWorld"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when echo is disabled", func() {
|
||||
BeforeEach(func() {
|
||||
testConfig.Echo = false
|
||||
input = "Hello"
|
||||
prediction = "World"
|
||||
})
|
||||
|
||||
It("should not modify the prediction with input", func() {
|
||||
result = Finetune(testConfig, input, prediction)
|
||||
Expect(result).To(Equal("World"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when cutstrings regex is applied", func() {
|
||||
BeforeEach(func() {
|
||||
input = ""
|
||||
prediction = "<div>Hello</div> World"
|
||||
})
|
||||
|
||||
It("should remove substrings matching cutstrings regex", func() {
|
||||
result = Finetune(testConfig, input, prediction)
|
||||
Expect(result).To(Equal("Hello World"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when extract regex is applied", func() {
|
||||
BeforeEach(func() {
|
||||
input = ""
|
||||
prediction = "<response><result>42</result></response>"
|
||||
})
|
||||
|
||||
It("should extract substrings matching the extract regex", func() {
|
||||
result = Finetune(testConfig, input, prediction)
|
||||
Expect(result).To(Equal("42"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when trimming spaces", func() {
|
||||
BeforeEach(func() {
|
||||
input = ""
|
||||
prediction = " Hello World "
|
||||
})
|
||||
|
||||
It("should trim spaces from the prediction", func() {
|
||||
result = Finetune(testConfig, input, prediction)
|
||||
Expect(result).To(Equal("Hello World"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when trimming suffixes", func() {
|
||||
BeforeEach(func() {
|
||||
input = ""
|
||||
prediction = "Hello World."
|
||||
})
|
||||
|
||||
It("should trim suffixes from the prediction", func() {
|
||||
result = Finetune(testConfig, input, prediction)
|
||||
Expect(result).To(Equal("Hello World"))
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
@ -126,6 +126,7 @@ type LLMConfig struct {
|
||||
Grammar string `yaml:"grammar"`
|
||||
StopWords []string `yaml:"stopwords"`
|
||||
Cutstrings []string `yaml:"cutstrings"`
|
||||
ExtractRegex []string `yaml:"extract_regex"`
|
||||
TrimSpace []string `yaml:"trimspace"`
|
||||
TrimSuffix []string `yaml:"trimsuffix"`
|
||||
|
||||
|
@ -68,9 +68,9 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup
|
||||
|
||||
textContentToReturn = functions.ParseTextContent(result, config.FunctionsConfig)
|
||||
result = functions.CleanupLLMResult(result, config.FunctionsConfig)
|
||||
results := functions.ParseFunctionCall(result, config.FunctionsConfig)
|
||||
functionResults := functions.ParseFunctionCall(result, config.FunctionsConfig)
|
||||
log.Debug().Msgf("Text content to return: %s", textContentToReturn)
|
||||
noActionToRun := len(results) > 0 && results[0].Name == noAction || len(results) == 0
|
||||
noActionToRun := len(functionResults) > 0 && functionResults[0].Name == noAction || len(functionResults) == 0
|
||||
|
||||
switch {
|
||||
case noActionToRun:
|
||||
@ -83,7 +83,7 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup
|
||||
}
|
||||
responses <- initialMessage
|
||||
|
||||
result, err := handleQuestion(config, req, ml, startupOptions, results, result, prompt)
|
||||
result, err := handleQuestion(config, req, ml, startupOptions, functionResults, result, prompt)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("error handling question")
|
||||
return
|
||||
@ -105,7 +105,7 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup
|
||||
responses <- resp
|
||||
|
||||
default:
|
||||
for i, ss := range results {
|
||||
for i, ss := range functionResults {
|
||||
name, args := ss.Name, ss.Arguments
|
||||
|
||||
initialMessage := schema.OpenAIResponse{
|
||||
|
Loading…
Reference in New Issue
Block a user