2022-10-04 17:35:01 +00:00
|
|
|
#ifndef WHISPER_H
|
|
|
|
#define WHISPER_H
|
|
|
|
|
2023-01-08 11:03:33 +00:00
|
|
|
#include <stddef.h>
|
2022-10-04 19:43:37 +00:00
|
|
|
#include <stdint.h>
|
2022-10-10 05:11:18 +00:00
|
|
|
#include <stdbool.h>
|
2022-10-04 19:43:37 +00:00
|
|
|
|
2022-10-04 17:35:01 +00:00
|
|
|
#ifdef WHISPER_SHARED
|
|
|
|
# ifdef _WIN32
|
|
|
|
# ifdef WHISPER_BUILD
|
|
|
|
# define WHISPER_API __declspec(dllexport)
|
|
|
|
# else
|
|
|
|
# define WHISPER_API __declspec(dllimport)
|
|
|
|
# endif
|
|
|
|
# else
|
|
|
|
# define WHISPER_API __attribute__ ((visibility ("default")))
|
|
|
|
# endif
|
|
|
|
#else
|
|
|
|
# define WHISPER_API
|
|
|
|
#endif
|
|
|
|
|
2022-10-04 19:43:37 +00:00
|
|
|
#define WHISPER_SAMPLE_RATE 16000
|
|
|
|
#define WHISPER_N_FFT 400
|
|
|
|
#define WHISPER_N_MEL 80
|
|
|
|
#define WHISPER_HOP_LENGTH 160
|
|
|
|
#define WHISPER_CHUNK_SIZE 30
|
|
|
|
|
2022-10-04 17:35:01 +00:00
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
|
|
|
//
|
|
|
|
// C interface
|
|
|
|
//
|
2022-10-18 15:27:57 +00:00
|
|
|
// The following interface is thread-safe as long as the sample whisper_context is not used by multiple threads
|
|
|
|
// concurrently.
|
2022-10-08 15:09:56 +00:00
|
|
|
//
|
|
|
|
// Basic usage:
|
|
|
|
//
|
|
|
|
// #include "whisper.h"
|
|
|
|
//
|
|
|
|
// ...
|
|
|
|
//
|
2023-01-08 11:03:33 +00:00
|
|
|
// struct whisper_context * ctx = whisper_init_from_file("/path/to/ggml-base.en.bin");
|
2022-10-08 15:09:56 +00:00
|
|
|
//
|
|
|
|
// if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
|
|
|
|
// fprintf(stderr, "failed to process audio\n");
|
|
|
|
// return 7;
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// const int n_segments = whisper_full_n_segments(ctx);
|
|
|
|
// for (int i = 0; i < n_segments; ++i) {
|
|
|
|
// const char * text = whisper_full_get_segment_text(ctx, i);
|
|
|
|
// printf("%s", text);
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// whisper_free(ctx);
|
|
|
|
//
|
|
|
|
// ...
|
|
|
|
//
|
|
|
|
// This is a demonstration of the most straightforward usage of the library.
|
|
|
|
// "pcmf32" contains the RAW audio data in 32-bit floating point format.
|
|
|
|
//
|
|
|
|
// The interface also allows for more fine-grained control over the computation, but it requires a deeper
|
|
|
|
// understanding of how the model works.
|
|
|
|
//
|
2022-10-04 17:35:01 +00:00
|
|
|
|
|
|
|
struct whisper_context;
|
|
|
|
|
|
|
|
typedef int whisper_token;
|
|
|
|
|
2022-11-02 19:18:20 +00:00
|
|
|
typedef struct whisper_token_data {
|
2022-10-29 06:42:14 +00:00
|
|
|
whisper_token id; // token id
|
|
|
|
whisper_token tid; // forced timestamp token id
|
|
|
|
|
2022-11-26 15:28:28 +00:00
|
|
|
float p; // probability of the token
|
2023-01-15 09:29:57 +00:00
|
|
|
float plog; // log probability of the token
|
2022-11-26 15:28:28 +00:00
|
|
|
float pt; // probability of the timestamp token
|
|
|
|
float ptsum; // sum of probabilities of all timestamp tokens
|
2022-11-02 19:18:20 +00:00
|
|
|
|
|
|
|
// token-level timestamp data
|
|
|
|
// do not use if you haven't computed token-level timestamps
|
2022-11-26 15:28:28 +00:00
|
|
|
int64_t t0; // start time of the token
|
|
|
|
int64_t t1; // end time of the token
|
2022-11-02 19:18:20 +00:00
|
|
|
|
2022-11-26 15:28:28 +00:00
|
|
|
float vlen; // voice length of the token
|
2022-11-02 19:18:20 +00:00
|
|
|
} whisper_token_data;
|
2022-10-29 06:42:14 +00:00
|
|
|
|
2023-01-08 11:03:33 +00:00
|
|
|
typedef struct whisper_model_loader {
|
|
|
|
void * context;
|
|
|
|
|
|
|
|
size_t (*read)(void * ctx, void * output, size_t read_size);
|
|
|
|
bool (*eof)(void * ctx);
|
|
|
|
void (*close)(void * ctx);
|
|
|
|
} whisper_model_loader;
|
|
|
|
|
2023-01-08 10:35:56 +00:00
|
|
|
// Various functions for loading a ggml whisper model.
|
|
|
|
// Allocate (almost) all memory needed for the model.
|
2023-01-08 11:03:33 +00:00
|
|
|
// Return NULL on failure
|
|
|
|
WHISPER_API struct whisper_context * whisper_init_from_file(const char * path_model);
|
|
|
|
WHISPER_API struct whisper_context * whisper_init_from_buffer(void * buffer, size_t buffer_size);
|
|
|
|
WHISPER_API struct whisper_context * whisper_init(struct whisper_model_loader * loader);
|
2022-10-08 15:09:56 +00:00
|
|
|
|
|
|
|
// Frees all memory allocated by the model.
|
2022-10-04 17:35:01 +00:00
|
|
|
WHISPER_API void whisper_free(struct whisper_context * ctx);
|
|
|
|
|
2022-10-08 15:09:56 +00:00
|
|
|
// Convert RAW PCM audio to log mel spectrogram.
|
|
|
|
// The resulting spectrogram is stored inside the provided whisper context.
|
|
|
|
// Returns 0 on success
|
2022-10-04 17:35:01 +00:00
|
|
|
WHISPER_API int whisper_pcm_to_mel(
|
|
|
|
struct whisper_context * ctx,
|
2022-11-26 15:28:28 +00:00
|
|
|
const float * samples,
|
|
|
|
int n_samples,
|
|
|
|
int n_threads);
|
2022-10-04 17:35:01 +00:00
|
|
|
|
2023-02-08 07:01:47 +00:00
|
|
|
// Convert RAW PCM audio to log mel spectrogram but applies a Phase Vocoder to speed up the audio x2.
|
|
|
|
// The resulting spectrogram is stored inside the provided whisper context.
|
|
|
|
// Returns 0 on success
|
|
|
|
WHISPER_API int whisper_pcm_to_mel_phase_vocoder(
|
|
|
|
struct whisper_context* ctx,
|
|
|
|
const float* samples,
|
|
|
|
int n_samples,
|
|
|
|
int n_threads);
|
|
|
|
|
|
|
|
|
2022-10-08 15:09:56 +00:00
|
|
|
// This can be used to set a custom log mel spectrogram inside the provided whisper context.
|
|
|
|
// Use this instead of whisper_pcm_to_mel() if you want to provide your own log mel spectrogram.
|
2022-10-04 17:35:01 +00:00
|
|
|
// n_mel must be 80
|
2022-10-08 15:09:56 +00:00
|
|
|
// Returns 0 on success
|
2022-10-04 17:35:01 +00:00
|
|
|
WHISPER_API int whisper_set_mel(
|
|
|
|
struct whisper_context * ctx,
|
2022-11-26 15:28:28 +00:00
|
|
|
const float * data,
|
|
|
|
int n_len,
|
|
|
|
int n_mel);
|
2022-10-04 17:35:01 +00:00
|
|
|
|
2022-10-08 15:09:56 +00:00
|
|
|
// Run the Whisper encoder on the log mel spectrogram stored inside the provided whisper context.
|
|
|
|
// Make sure to call whisper_pcm_to_mel() or whisper_set_mel() first.
|
|
|
|
// offset can be used to specify the offset of the first frame in the spectrogram.
|
|
|
|
// Returns 0 on success
|
2022-10-04 17:35:01 +00:00
|
|
|
WHISPER_API int whisper_encode(
|
|
|
|
struct whisper_context * ctx,
|
2022-11-26 15:28:28 +00:00
|
|
|
int offset,
|
|
|
|
int n_threads);
|
2022-10-04 17:35:01 +00:00
|
|
|
|
2022-10-08 15:09:56 +00:00
|
|
|
// Run the Whisper decoder to obtain the logits and probabilities for the next token.
|
|
|
|
// Make sure to call whisper_encode() first.
|
|
|
|
// tokens + n_tokens is the provided context for the decoder.
|
|
|
|
// n_past is the number of tokens to use from previous decoder calls.
|
|
|
|
// Returns 0 on success
|
2023-01-15 09:29:57 +00:00
|
|
|
// TODO: add support for multiple decoders
|
2022-10-04 17:35:01 +00:00
|
|
|
WHISPER_API int whisper_decode(
|
|
|
|
struct whisper_context * ctx,
|
2022-11-26 15:28:28 +00:00
|
|
|
const whisper_token * tokens,
|
|
|
|
int n_tokens,
|
|
|
|
int n_past,
|
|
|
|
int n_threads);
|
2022-10-04 17:35:01 +00:00
|
|
|
|
2022-12-13 17:21:07 +00:00
|
|
|
// Convert the provided text into tokens.
|
|
|
|
// The tokens pointer must be large enough to hold the resulting tokens.
|
|
|
|
// Returns the number of tokens on success, no more than n_max_tokens
|
|
|
|
// Returns -1 on failure
|
|
|
|
// TODO: not sure if correct
|
|
|
|
WHISPER_API int whisper_tokenize(
|
|
|
|
struct whisper_context * ctx,
|
|
|
|
const char * text,
|
|
|
|
whisper_token * tokens,
|
2023-01-05 19:07:50 +00:00
|
|
|
int n_max_tokens);
|
2022-12-13 17:21:07 +00:00
|
|
|
|
2022-12-17 15:58:08 +00:00
|
|
|
// Largest language id (i.e. number of available languages - 1)
|
|
|
|
WHISPER_API int whisper_lang_max_id();
|
|
|
|
|
2022-10-08 15:09:56 +00:00
|
|
|
// Return the id of the specified language, returns -1 if not found
|
2022-12-17 15:58:08 +00:00
|
|
|
// Examples:
|
|
|
|
// "de" -> 2
|
|
|
|
// "german" -> 2
|
2022-10-04 17:35:01 +00:00
|
|
|
WHISPER_API int whisper_lang_id(const char * lang);
|
|
|
|
|
2022-12-17 15:58:08 +00:00
|
|
|
// Return the short string of the specified language id (e.g. 2 -> "de"), returns nullptr if not found
|
|
|
|
WHISPER_API const char * whisper_lang_str(int id);
|
|
|
|
|
|
|
|
// Use mel data at offset_ms to try and auto-detect the spoken language
|
|
|
|
// Make sure to call whisper_pcm_to_mel() or whisper_set_mel() first
|
|
|
|
// Returns the top language id or negative on failure
|
|
|
|
// If not null, fills the lang_probs array with the probabilities of all languages
|
|
|
|
// The array must be whispe_lang_max_id() + 1 in size
|
|
|
|
// ref: https://github.com/openai/whisper/blob/main/whisper/decoding.py#L18-L69
|
|
|
|
WHISPER_API int whisper_lang_auto_detect(
|
|
|
|
struct whisper_context * ctx,
|
|
|
|
int offset_ms,
|
|
|
|
int n_threads,
|
|
|
|
float * lang_probs);
|
|
|
|
|
2022-10-08 07:56:59 +00:00
|
|
|
WHISPER_API int whisper_n_len (struct whisper_context * ctx); // mel length
|
|
|
|
WHISPER_API int whisper_n_vocab (struct whisper_context * ctx);
|
|
|
|
WHISPER_API int whisper_n_text_ctx (struct whisper_context * ctx);
|
2022-12-31 07:55:33 +00:00
|
|
|
WHISPER_API int whisper_n_audio_ctx (struct whisper_context * ctx);
|
2022-10-08 07:56:59 +00:00
|
|
|
WHISPER_API int whisper_is_multilingual(struct whisper_context * ctx);
|
|
|
|
|
2023-01-15 09:29:57 +00:00
|
|
|
// Token logits obtained from the last call to whisper_decode()
|
|
|
|
// The logits for the last token are stored in the last row
|
|
|
|
// Rows: n_tokens
|
|
|
|
// Cols: n_vocab
|
|
|
|
WHISPER_API float * whisper_get_logits(struct whisper_context * ctx);
|
2022-10-04 17:35:01 +00:00
|
|
|
|
2022-10-08 15:09:56 +00:00
|
|
|
// Token Id -> String. Uses the vocabulary in the provided context
|
2022-10-04 17:35:01 +00:00
|
|
|
WHISPER_API const char * whisper_token_to_str(struct whisper_context * ctx, whisper_token token);
|
|
|
|
|
2022-10-08 15:09:56 +00:00
|
|
|
// Special tokens
|
2022-10-04 17:35:01 +00:00
|
|
|
WHISPER_API whisper_token whisper_token_eot (struct whisper_context * ctx);
|
|
|
|
WHISPER_API whisper_token whisper_token_sot (struct whisper_context * ctx);
|
|
|
|
WHISPER_API whisper_token whisper_token_prev(struct whisper_context * ctx);
|
|
|
|
WHISPER_API whisper_token whisper_token_solm(struct whisper_context * ctx);
|
|
|
|
WHISPER_API whisper_token whisper_token_not (struct whisper_context * ctx);
|
|
|
|
WHISPER_API whisper_token whisper_token_beg (struct whisper_context * ctx);
|
2022-12-17 15:58:08 +00:00
|
|
|
WHISPER_API whisper_token whisper_token_lang(struct whisper_context * ctx, int lang_id);
|
2022-10-04 17:35:01 +00:00
|
|
|
|
2022-10-08 15:09:56 +00:00
|
|
|
// Task tokens
|
2022-11-26 14:27:04 +00:00
|
|
|
WHISPER_API whisper_token whisper_token_translate (void);
|
|
|
|
WHISPER_API whisper_token whisper_token_transcribe(void);
|
2022-10-04 17:35:01 +00:00
|
|
|
|
2022-10-08 15:09:56 +00:00
|
|
|
// Performance information
|
2022-10-04 17:35:01 +00:00
|
|
|
WHISPER_API void whisper_print_timings(struct whisper_context * ctx);
|
2022-11-25 21:07:42 +00:00
|
|
|
WHISPER_API void whisper_reset_timings(struct whisper_context * ctx);
|
2022-10-04 17:35:01 +00:00
|
|
|
|
2022-11-26 15:28:28 +00:00
|
|
|
// Print system information
|
|
|
|
WHISPER_API const char * whisper_print_system_info(void);
|
|
|
|
|
2022-10-04 17:35:01 +00:00
|
|
|
////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
2022-10-18 15:17:24 +00:00
|
|
|
// Available sampling strategies
|
|
|
|
enum whisper_sampling_strategy {
|
2023-01-15 09:29:57 +00:00
|
|
|
WHISPER_SAMPLING_GREEDY, // similar to OpenAI's GreefyDecoder
|
|
|
|
WHISPER_SAMPLING_BEAM_SEARCH, // similar to OpenAI's BeamSearchDecoder
|
2022-10-04 17:35:01 +00:00
|
|
|
};
|
|
|
|
|
2022-10-22 18:06:50 +00:00
|
|
|
// Text segment callback
|
|
|
|
// Called on every newly generated text segment
|
|
|
|
// Use the whisper_full_...() functions to obtain the text segments
|
2022-11-02 19:18:20 +00:00
|
|
|
typedef void (*whisper_new_segment_callback)(struct whisper_context * ctx, int n_new, void * user_data);
|
2022-10-22 18:06:50 +00:00
|
|
|
|
2022-11-27 18:28:36 +00:00
|
|
|
// Encoder begin callback
|
|
|
|
// If not NULL, called before the encoder starts
|
|
|
|
// If it returns false, the computation is aborted
|
|
|
|
typedef bool (*whisper_encoder_begin_callback)(struct whisper_context * ctx, void * user_data);
|
|
|
|
|
|
|
|
// Parameters for the whisper_full() function
|
|
|
|
// If you chnage the order or add new parameters, make sure to update the default values in whisper.cpp:
|
|
|
|
// whisper_full_default_params()
|
2022-10-04 17:35:01 +00:00
|
|
|
struct whisper_full_params {
|
2022-10-18 15:17:24 +00:00
|
|
|
enum whisper_sampling_strategy strategy;
|
2022-10-04 17:35:01 +00:00
|
|
|
|
|
|
|
int n_threads;
|
2023-01-15 09:29:57 +00:00
|
|
|
int n_max_text_ctx; // max tokens to use from past text as prompt for the decoder
|
2022-11-26 15:28:28 +00:00
|
|
|
int offset_ms; // start offset in ms
|
|
|
|
int duration_ms; // audio duration to process in ms
|
2022-10-04 17:35:01 +00:00
|
|
|
|
2022-10-04 19:43:37 +00:00
|
|
|
bool translate;
|
2023-01-16 17:37:06 +00:00
|
|
|
bool no_context; // do not use past transcription (if any) as initial prompt for the decoder
|
2022-11-26 15:28:28 +00:00
|
|
|
bool single_segment; // force single segment output (useful for streaming)
|
2023-01-15 09:29:57 +00:00
|
|
|
bool print_special; // print special tokens (e.g. <SOT>, <EOT>, <BEG>, etc.)
|
|
|
|
bool print_progress; // print progress information
|
|
|
|
bool print_realtime; // print results from within whisper.cpp (avoid it, use callback instead)
|
|
|
|
bool print_timestamps; // print timestamps for each text segment when printing realtime
|
2022-10-04 17:35:01 +00:00
|
|
|
|
2022-11-02 19:18:20 +00:00
|
|
|
// [EXPERIMENTAL] token-level timestamps
|
|
|
|
bool token_timestamps; // enable token-level timestamps
|
|
|
|
float thold_pt; // timestamp token probability threshold (~0.01)
|
|
|
|
float thold_ptsum; // timestamp token sum probability threshold (~0.01)
|
|
|
|
int max_len; // max segment length in characters
|
2023-02-05 12:44:23 +00:00
|
|
|
bool split_on_word; // split on word rather than on token (when used with max_len)
|
2022-11-20 18:52:24 +00:00
|
|
|
int max_tokens; // max tokens per segment (0 = no limit)
|
2022-11-02 19:18:20 +00:00
|
|
|
|
2022-11-12 16:03:49 +00:00
|
|
|
// [EXPERIMENTAL] speed-up techniques
|
2023-01-15 09:29:57 +00:00
|
|
|
// note: these can significantly reduce the quality of the output
|
2022-11-26 15:28:28 +00:00
|
|
|
bool speed_up; // speed-up the audio by 2x using Phase Vocoder
|
|
|
|
int audio_ctx; // overwrite the audio context size (0 = use default)
|
2022-11-12 16:03:49 +00:00
|
|
|
|
2023-01-15 09:29:57 +00:00
|
|
|
// tokens to provide to the whisper decoder as initial prompt
|
2022-11-22 16:20:05 +00:00
|
|
|
// these are prepended to any existing text context from a previous call
|
2022-11-22 16:10:35 +00:00
|
|
|
const whisper_token * prompt_tokens;
|
|
|
|
int prompt_n_tokens;
|
|
|
|
|
2022-12-17 15:58:08 +00:00
|
|
|
// for auto-detection, set to nullptr, "" or "auto"
|
2022-10-04 17:35:01 +00:00
|
|
|
const char * language;
|
|
|
|
|
2023-01-15 09:29:57 +00:00
|
|
|
// common decoding parameters:
|
|
|
|
bool suppress_blank; // ref: https://github.com/openai/whisper/blob/f82bc59f5ea234d4b97fb2860842ed38519f7e65/whisper/decoding.py#L89
|
2023-02-08 07:05:34 +00:00
|
|
|
bool suppress_non_speech_tokens; // ref: https://github.com/openai/whisper/blob/7858aa9c08d98f75575035ecd6481f462d66ca27/whisper/tokenizer.py#L224-L253
|
2023-01-15 09:29:57 +00:00
|
|
|
|
|
|
|
float temperature; // initial decoding temperature, ref: https://ai.stackexchange.com/a/32478
|
|
|
|
float max_initial_ts; // ref: https://github.com/openai/whisper/blob/f82bc59f5ea234d4b97fb2860842ed38519f7e65/whisper/decoding.py#L97
|
|
|
|
float length_penalty; // ref: https://github.com/openai/whisper/blob/f82bc59f5ea234d4b97fb2860842ed38519f7e65/whisper/transcribe.py#L267
|
|
|
|
|
|
|
|
// fallback parameters
|
|
|
|
// ref: https://github.com/openai/whisper/blob/f82bc59f5ea234d4b97fb2860842ed38519f7e65/whisper/transcribe.py#L274-L278
|
|
|
|
float temperature_inc;
|
|
|
|
float entropy_thold; // similar to OpenAI's "compression_ratio_threshold"
|
|
|
|
float logprob_thold;
|
|
|
|
float no_speech_thold; // TODO: not implemented
|
|
|
|
|
2022-10-18 15:17:24 +00:00
|
|
|
struct {
|
2023-01-15 09:29:57 +00:00
|
|
|
int best_of; // ref: https://github.com/openai/whisper/blob/f82bc59f5ea234d4b97fb2860842ed38519f7e65/whisper/transcribe.py#L264
|
2022-10-18 15:17:24 +00:00
|
|
|
} greedy;
|
|
|
|
|
|
|
|
struct {
|
2023-01-15 09:29:57 +00:00
|
|
|
int beam_size; // ref: https://github.com/openai/whisper/blob/f82bc59f5ea234d4b97fb2860842ed38519f7e65/whisper/transcribe.py#L265
|
|
|
|
|
|
|
|
float patience; // TODO: not implemented, ref: https://arxiv.org/pdf/2204.05424.pdf
|
2022-10-18 15:17:24 +00:00
|
|
|
} beam_search;
|
2022-10-22 18:06:50 +00:00
|
|
|
|
2023-01-15 09:29:57 +00:00
|
|
|
// called for every newly generated text segment
|
2022-10-22 18:06:50 +00:00
|
|
|
whisper_new_segment_callback new_segment_callback;
|
|
|
|
void * new_segment_callback_user_data;
|
2022-11-27 18:28:36 +00:00
|
|
|
|
2023-01-15 09:29:57 +00:00
|
|
|
// called each time before the encoder starts
|
2022-11-27 18:28:36 +00:00
|
|
|
whisper_encoder_begin_callback encoder_begin_callback;
|
|
|
|
void * encoder_begin_callback_user_data;
|
2022-10-04 17:35:01 +00:00
|
|
|
};
|
|
|
|
|
2022-10-18 15:17:24 +00:00
|
|
|
WHISPER_API struct whisper_full_params whisper_full_default_params(enum whisper_sampling_strategy strategy);
|
2022-10-04 19:43:37 +00:00
|
|
|
|
2022-10-08 15:09:56 +00:00
|
|
|
// Run the entire model: PCM -> log mel spectrogram -> encoder -> decoder -> text
|
|
|
|
// Uses the specified decoding strategy to obtain the text.
|
2022-10-04 17:35:01 +00:00
|
|
|
WHISPER_API int whisper_full(
|
2022-11-26 15:28:28 +00:00
|
|
|
struct whisper_context * ctx,
|
|
|
|
struct whisper_full_params params,
|
|
|
|
const float * samples,
|
|
|
|
int n_samples);
|
2022-10-04 17:35:01 +00:00
|
|
|
|
2022-10-29 11:08:23 +00:00
|
|
|
// Split the input audio in chunks and process each chunk separately using whisper_full()
|
|
|
|
// It seems this approach can offer some speedup in some cases.
|
|
|
|
// However, the transcription accuracy can be worse at the beginning and end of each chunk.
|
2022-10-29 09:24:02 +00:00
|
|
|
WHISPER_API int whisper_full_parallel(
|
2022-11-26 15:28:28 +00:00
|
|
|
struct whisper_context * ctx,
|
|
|
|
struct whisper_full_params params,
|
|
|
|
const float * samples,
|
|
|
|
int n_samples,
|
|
|
|
int n_processors);
|
2022-10-29 09:24:02 +00:00
|
|
|
|
2022-10-08 15:09:56 +00:00
|
|
|
// Number of generated text segments.
|
|
|
|
// A segment can be a few words, a sentence, or even a paragraph.
|
2022-10-04 19:43:37 +00:00
|
|
|
WHISPER_API int whisper_full_n_segments(struct whisper_context * ctx);
|
|
|
|
|
2023-02-05 12:46:26 +00:00
|
|
|
// Language id associated with the current context
|
|
|
|
WHISPER_API int whisper_full_lang_id(struct whisper_context * ctx);
|
|
|
|
|
2022-10-08 15:09:56 +00:00
|
|
|
// Get the start and end time of the specified segment.
|
2022-10-04 19:43:37 +00:00
|
|
|
WHISPER_API int64_t whisper_full_get_segment_t0(struct whisper_context * ctx, int i_segment);
|
|
|
|
WHISPER_API int64_t whisper_full_get_segment_t1(struct whisper_context * ctx, int i_segment);
|
|
|
|
|
2022-10-08 15:09:56 +00:00
|
|
|
// Get the text of the specified segment.
|
2022-10-04 19:43:37 +00:00
|
|
|
WHISPER_API const char * whisper_full_get_segment_text(struct whisper_context * ctx, int i_segment);
|
|
|
|
|
2022-10-21 14:33:59 +00:00
|
|
|
// Get number of tokens in the specified segment.
|
|
|
|
WHISPER_API int whisper_full_n_tokens(struct whisper_context * ctx, int i_segment);
|
|
|
|
|
|
|
|
// Get the token text of the specified token in the specified segment.
|
|
|
|
WHISPER_API const char * whisper_full_get_token_text(struct whisper_context * ctx, int i_segment, int i_token);
|
2022-10-22 18:06:50 +00:00
|
|
|
WHISPER_API whisper_token whisper_full_get_token_id (struct whisper_context * ctx, int i_segment, int i_token);
|
2022-10-21 14:33:59 +00:00
|
|
|
|
2022-10-30 08:05:58 +00:00
|
|
|
// Get token data for the specified token in the specified segment.
|
|
|
|
// This contains probabilities, timestamps, etc.
|
2022-11-02 19:18:20 +00:00
|
|
|
WHISPER_API whisper_token_data whisper_full_get_token_data(struct whisper_context * ctx, int i_segment, int i_token);
|
2022-10-30 08:05:58 +00:00
|
|
|
|
2022-10-21 14:33:59 +00:00
|
|
|
// Get the probability of the specified token in the specified segment.
|
|
|
|
WHISPER_API float whisper_full_get_token_p(struct whisper_context * ctx, int i_segment, int i_token);
|
|
|
|
|
2023-01-18 19:00:41 +00:00
|
|
|
////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
// Temporary helpers needed for exposing ggml interface
|
|
|
|
|
|
|
|
WHISPER_API int whisper_bench_memcpy(int n_threads);
|
|
|
|
WHISPER_API int whisper_bench_ggml_mul_mat(int n_threads);
|
|
|
|
|
2022-10-04 17:35:01 +00:00
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif
|