mirror of
https://github.com/mudler/LocalAI.git
synced 2024-12-25 15:21:04 +00:00
f895d06605
* fix(defaults): set better defaults for inferencing This changeset aim to have better defaults and to properly detect when no inference settings are provided with the model. If not specified, we defaults to mirostat sampling, and offload all the GPU layers (if a GPU is detected). Related to https://github.com/mudler/LocalAI/issues/1373 and https://github.com/mudler/LocalAI/issues/1723 * Adapt tests * Also pre-initialize default seed
39 lines
1.0 KiB
Go
39 lines
1.0 KiB
Go
package backend
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
|
|
"github.com/go-skynet/LocalAI/core/config"
|
|
"github.com/go-skynet/LocalAI/core/schema"
|
|
|
|
"github.com/go-skynet/LocalAI/pkg/grpc/proto"
|
|
model "github.com/go-skynet/LocalAI/pkg/model"
|
|
)
|
|
|
|
func ModelTranscription(audio, language string, ml *model.ModelLoader, backendConfig config.BackendConfig, appConfig *config.ApplicationConfig) (*schema.Result, error) {
|
|
|
|
opts := modelOpts(backendConfig, appConfig, []model.Option{
|
|
model.WithBackendString(model.WhisperBackend),
|
|
model.WithModel(backendConfig.Model),
|
|
model.WithContext(appConfig.Context),
|
|
model.WithThreads(uint32(*backendConfig.Threads)),
|
|
model.WithAssetDir(appConfig.AssetsDestination),
|
|
})
|
|
|
|
whisperModel, err := ml.BackendLoader(opts...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
if whisperModel == nil {
|
|
return nil, fmt.Errorf("could not load whisper model")
|
|
}
|
|
|
|
return whisperModel.AudioTranscription(context.Background(), &proto.TranscriptRequest{
|
|
Dst: audio,
|
|
Language: language,
|
|
Threads: uint32(*backendConfig.Threads),
|
|
})
|
|
}
|