From 9d5771ae43d7fc7cca9d31dd924b13a29144e476 Mon Sep 17 00:00:00 2001 From: petterreinholdtsen Date: Tue, 14 May 2024 20:32:41 +0200 Subject: [PATCH] talk-llama : reject runs without required arguments (#2153) * Extended talk-llama example to reject runs without required arguments. Print warning and exit if models are not specified on the command line. * Update examples/talk-llama/talk-llama.cpp * Update examples/talk-llama/talk-llama.cpp --------- Co-authored-by: Georgi Gerganov --- examples/talk-llama/talk-llama.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/examples/talk-llama/talk-llama.cpp b/examples/talk-llama/talk-llama.cpp index bb8c26d5..838d6f56 100644 --- a/examples/talk-llama/talk-llama.cpp +++ b/examples/talk-llama/talk-llama.cpp @@ -288,6 +288,10 @@ int main(int argc, char ** argv) { cparams.use_gpu = params.use_gpu; struct whisper_context * ctx_wsp = whisper_init_from_file_with_params(params.model_wsp.c_str(), cparams); + if (!ctx_wsp) { + fprintf(stderr, "No whisper.cpp model specified. Please provide using -mw \n"); + return 1; + } // llama init @@ -301,6 +305,10 @@ int main(int argc, char ** argv) { } struct llama_model * model_llama = llama_load_model_from_file(params.model_llama.c_str(), lmparams); + if (!model_llama) { + fprintf(stderr, "No llama.cpp model specified. Please provide using -ml \n"); + return 1; + } llama_context_params lcparams = llama_context_default_params();