From 29f78392c182f65c4258885f86d15711cc3fd7ba Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 8 Jan 2024 16:41:28 +0200 Subject: [PATCH] main : add cli option to disable system prints (#1740) --- examples/main/main.cpp | 39 ++++++++++++++++++++++++--------------- whisper.cpp | 2 +- 2 files changed, 25 insertions(+), 16 deletions(-) diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 9699802e..234e2375 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -85,6 +85,7 @@ struct whisper_params { bool output_jsn = false; bool output_jsn_full = false; bool output_lrc = false; + bool no_prints = false; bool print_special = false; bool print_colors = false; bool print_progress = false; @@ -155,6 +156,7 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) { else if (arg == "-oj" || arg == "--output-json") { params.output_jsn = true; } else if (arg == "-ojf" || arg == "--output-json-full"){ params.output_jsn_full = params.output_jsn = true; } else if (arg == "-of" || arg == "--output-file") { params.fname_out.emplace_back(argv[++i]); } + else if (arg == "-np" || arg == "--no-prints") { params.no_prints = true; } else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; } else if (arg == "-pc" || arg == "--print-colors") { params.print_colors = true; } else if (arg == "-pp" || arg == "--print-progress") { params.print_progress = true; } @@ -212,6 +214,7 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para fprintf(stderr, " -oj, --output-json [%-7s] output result in a JSON file\n", params.output_jsn ? "true" : "false"); fprintf(stderr, " -ojf, --output-json-full [%-7s] include more information in the JSON file\n", params.output_jsn_full ? "true" : "false"); fprintf(stderr, " -of FNAME, --output-file FNAME [%-7s] output file path (without file extension)\n", ""); + fprintf(stderr, " -np, --no-prints [%-7s] do not print anything other than the results\n", params.no_prints ? "true" : "false"); fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false"); fprintf(stderr, " -pc, --print-colors [%-7s] print colors\n", params.print_colors ? "true" : "false"); fprintf(stderr, " -pp, --print-progress [%-7s] print progress\n", params.print_progress ? "true" : "false"); @@ -852,6 +855,9 @@ bool output_lrc(struct whisper_context * ctx, const char * fname, const whisper_ return true; } + +void cb_log_disable(enum ggml_log_level , const char * , void * ) { } + int main(int argc, char ** argv) { whisper_params params; @@ -878,6 +884,10 @@ int main(int argc, char ** argv) { exit(0); } + if (params.no_prints) { + whisper_log_set(cb_log_disable, NULL); + } + // whisper init struct whisper_context_params cparams; @@ -905,26 +915,25 @@ int main(int argc, char ** argv) { continue; } - // print system information - { + if (!whisper_is_multilingual(ctx)) { + if (params.language != "en" || params.translate) { + params.language = "en"; + params.translate = false; + fprintf(stderr, "%s: WARNING: model is not multilingual, ignoring language and translation options\n", __func__); + } + } + if (params.detect_language) { + params.language = "auto"; + } + + if (!params.no_prints) { + // print system information fprintf(stderr, "\n"); fprintf(stderr, "system_info: n_threads = %d / %d | %s\n", params.n_threads*params.n_processors, std::thread::hardware_concurrency(), whisper_print_system_info()); - } - // print some info about the processing - { + // print some info about the processing fprintf(stderr, "\n"); - if (!whisper_is_multilingual(ctx)) { - if (params.language != "en" || params.translate) { - params.language = "en"; - params.translate = false; - fprintf(stderr, "%s: WARNING: model is not multilingual, ignoring language and translation options\n", __func__); - } - } - if (params.detect_language) { - params.language = "auto"; - } fprintf(stderr, "%s: processing '%s' (%d samples, %.1f sec), %d threads, %d processors, %d beams + best of %d, lang = %s, task = %s, %stimestamps = %d ...\n", __func__, fname_inp.c_str(), int(pcmf32.size()), float(pcmf32.size())/WHISPER_SAMPLE_RATE, params.n_threads, params.n_processors, params.beam_size, params.best_of, diff --git a/whisper.cpp b/whisper.cpp index f6ba822a..93f3063c 100644 --- a/whisper.cpp +++ b/whisper.cpp @@ -1070,7 +1070,7 @@ static ggml_backend_t whisper_backend_init(const whisper_context_params & params #ifdef GGML_USE_METAL if (params.use_gpu) { WHISPER_LOG_INFO("%s: using Metal backend\n", __func__); - ggml_metal_log_set_callback(whisper_log_callback_default, nullptr); + ggml_metal_log_set_callback(g_state.log_callback, g_state.log_callback_user_data); backend_gpu = ggml_backend_metal_init(); if (!backend_gpu) { WHISPER_LOG_ERROR("%s: ggml_backend_metal_init() failed\n", __func__);