mirror of
https://github.com/ggerganov/whisper.cpp.git
synced 2025-05-12 21:42:55 +00:00
* scripts : update sync [no ci] * files : reorganize [no ci] * sync : llama.cpp * cmake : link math library * cmake : build normal ggml library * files : move headers to include * objc : fix path to ggml-metal.h * ci : fix WHISPER_CUDA -> GGML_CUDA * scripts : sync LICENSE [no ci]
32 lines
876 B
C
32 lines
876 B
C
// Wrapper of the OpenVINO Whisper Encoder model
|
|
//
|
|
|
|
#if __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
struct whisper_openvino_context;
|
|
|
|
// initialize openvino encoder, given path to model xml, device ("CPU", "GPU", etc.), and
|
|
// path to cache_dir. Returns null upon failure.
|
|
struct whisper_openvino_context * whisper_openvino_init(const char * path_model,
|
|
const char * device,
|
|
const char * cache_dir);
|
|
|
|
// clean up a ctx previously returned from whisper_openvino_init()
|
|
void whisper_openvino_free(struct whisper_openvino_context * ctx);
|
|
|
|
struct ggml_tensor;
|
|
|
|
// Perform encode using OpenVINO.
|
|
// Returns 1 on success
|
|
// Returns 0 on failure
|
|
int whisper_openvino_encode(
|
|
whisper_openvino_context* ctx,
|
|
ggml_tensor* mel,
|
|
ggml_tensor* out);
|
|
|
|
#if __cplusplus
|
|
}
|
|
#endif
|