diff --git a/examples/command/command.cpp b/examples/command/command.cpp index 11ed9ed6..49f40342 100644 --- a/examples/command/command.cpp +++ b/examples/command/command.cpp @@ -21,6 +21,7 @@ #include #include #include +#include // command-line parameters struct whisper_params { diff --git a/examples/lsp/lsp.cpp b/examples/lsp/lsp.cpp index 803cd6d5..5d65aced 100644 --- a/examples/lsp/lsp.cpp +++ b/examples/lsp/lsp.cpp @@ -11,6 +11,7 @@ #include #include #include +#include using json = nlohmann::json; diff --git a/examples/server/server.cpp b/examples/server/server.cpp index beef57dd..21ae2594 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #if defined(_MSC_VER) #pragma warning(disable: 4244 4267) // possible loss of data diff --git a/examples/stream/stream.cpp b/examples/stream/stream.cpp index 190f68a2..5f7387f7 100644 --- a/examples/stream/stream.cpp +++ b/examples/stream/stream.cpp @@ -12,7 +12,7 @@ #include #include #include - +#include // command-line parameters struct whisper_params { @@ -157,6 +157,7 @@ int main(int argc, char ** argv) { cparams.use_gpu = params.use_gpu; cparams.flash_attn = params.flash_attn; + fprintf(stderr, "whisper_init_from_file_with_params ...\n"); struct whisper_context * ctx = whisper_init_from_file_with_params(params.model.c_str(), cparams); std::vector pcmf32 (n_samples_30s, 0.0f); @@ -166,6 +167,8 @@ int main(int argc, char ** argv) { std::vector prompt_tokens; // print some info about the processing + fprintf(stderr, "whisper_init_from_file_with_params ok\n"); + { fprintf(stderr, "\n"); if (!whisper_is_multilingual(ctx)) { diff --git a/examples/talk-llama/talk-llama.cpp b/examples/talk-llama/talk-llama.cpp index dcdaec48..51658700 100644 --- a/examples/talk-llama/talk-llama.cpp +++ b/examples/talk-llama/talk-llama.cpp @@ -15,6 +15,7 @@ #include #include #include +#include static std::vector llama_tokenize(struct llama_context * ctx, const std::string & text, bool add_bos) { const llama_model * model = llama_get_model(ctx); diff --git a/examples/wchess/libwchess/WChess.cpp b/examples/wchess/libwchess/WChess.cpp index d9f06696..5da0345a 100644 --- a/examples/wchess/libwchess/WChess.cpp +++ b/examples/wchess/libwchess/WChess.cpp @@ -3,6 +3,7 @@ #include "grammar-parser.h" #include "common.h" #include +#include WChess::WChess(whisper_context * ctx, const whisper_full_params & wparams,