mirror of
https://github.com/mudler/LocalAI.git
synced 2024-12-21 05:33:09 +00:00
530bec9c64
* feat(initializer): do not specify backends to autoload We can simply try to autoload the backends extracted in the asset dir. This will allow to build variants of the same backend (for e.g. with different instructions sets), so to have a single binary for all the variants. Signed-off-by: mudler <mudler@localai.io> * refactor(prepare): refactor out llama.cpp prepare steps Make it so are idempotent and that we can re-build Signed-off-by: mudler <mudler@localai.io> * [TEST] feat(build): build noavx version along Signed-off-by: mudler <mudler@localai.io> * build: make build parallel Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * build: do not override CMAKE_ARGS Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * build: add fallback variant Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Fixups Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * fix(huggingface-langchain): fail if no token is set Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * fix(huggingface-langchain): rename Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * fix: do not autoload local-store Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * fix: give priority between the listed backends Signed-off-by: Ettore Di Giacinto <mudler@localai.io> --------- Signed-off-by: mudler <mudler@localai.io> Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
54 lines
1.1 KiB
Go
54 lines
1.1 KiB
Go
package langchain
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
|
|
"github.com/tmc/langchaingo/llms"
|
|
"github.com/tmc/langchaingo/llms/huggingface"
|
|
)
|
|
|
|
type HuggingFace struct {
|
|
modelPath string
|
|
token string
|
|
}
|
|
|
|
func NewHuggingFace(repoId, token string) (*HuggingFace, error) {
|
|
if token == "" {
|
|
return nil, fmt.Errorf("no huggingface token provided")
|
|
}
|
|
return &HuggingFace{
|
|
modelPath: repoId,
|
|
token: token,
|
|
}, nil
|
|
}
|
|
|
|
func (s *HuggingFace) PredictHuggingFace(text string, opts ...PredictOption) (*Predict, error) {
|
|
po := NewPredictOptions(opts...)
|
|
|
|
// Init client
|
|
llm, err := huggingface.New(huggingface.WithToken(s.token))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
// Convert from LocalAI to LangChainGo format of options
|
|
co := []llms.CallOption{
|
|
llms.WithModel(po.Model),
|
|
llms.WithMaxTokens(po.MaxTokens),
|
|
llms.WithTemperature(po.Temperature),
|
|
llms.WithStopWords(po.StopWords),
|
|
}
|
|
|
|
// Call Inference API
|
|
ctx := context.Background()
|
|
completion, err := llm.Call(ctx, text, co...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return &Predict{
|
|
Completion: completion,
|
|
}, nil
|
|
}
|