diff --git a/whisper.cpp b/whisper.cpp index ab734fe..9923fa0 100644 --- a/whisper.cpp +++ b/whisper.cpp @@ -14,6 +14,7 @@ #define _USE_MATH_DEFINES #include #include +#include #include #include #include @@ -92,7 +93,7 @@ static void byteswap_tensor(ggml_tensor * tensor) { #define WHISPER_ASSERT(x) \ do { \ if (!(x)) { \ - fprintf(stderr, "WHISPER_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \ + log("WHISPER_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \ abort(); \ } \ } while (0) @@ -723,6 +724,21 @@ struct whisper_context { std::string path_model; // populated by whisper_init_from_file() }; +static void whisper_default_log(const char * text) { + fprintf(stderr, "%s", text); +} + +static whisper_log_callback whisper_log = whisper_default_log; + +static void log(const char * fmt, ...) { + if (!whisper_log) return; + char buf[1024]; + va_list args; + va_start(args, fmt); + vsnprintf(buf, sizeof(buf), fmt, args); + whisper_log(buf); +} + template static void read_safe(whisper_model_loader * loader, T & dest) { loader->read(loader->context, &dest, sizeof(T)); @@ -746,7 +762,7 @@ static bool kv_cache_init( cache.ctx = ggml_init(params); if (!cache.ctx) { - fprintf(stderr, "%s: failed to allocate memory for kv cache\n", __func__); + log("%s: failed to allocate memory for kv cache\n", __func__); return false; } @@ -782,7 +798,7 @@ static bool kv_cache_reinit(struct whisper_kv_cache & cache) { cache.ctx = ggml_init(params); if (!cache.ctx) { - fprintf(stderr, "%s: failed to allocate memory for kv cache\n", __func__); + log("%s: failed to allocate memory for kv cache\n", __func__); return false; } @@ -811,7 +827,7 @@ static void kv_cache_free(struct whisper_kv_cache & cache) { // see the convert-pt-to-ggml.py script for details // static bool whisper_model_load(struct whisper_model_loader * loader, whisper_context & wctx) { - fprintf(stderr, "%s: loading model\n", __func__); + log("%s: loading model\n", __func__); const int64_t t_start_us = ggml_time_us(); @@ -825,7 +841,7 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con uint32_t magic; read_safe(loader, magic); if (magic != GGML_FILE_MAGIC) { - fprintf(stderr, "%s: invalid model data (bad magic)\n", __func__); + log("%s: invalid model data (bad magic)\n", __func__); return false; } } @@ -876,25 +892,25 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con // in order to save memory and also to speed up the computation wctx.wtype = ggml_ftype_to_ggml_type((ggml_ftype) (model.hparams.ftype)); if (wctx.wtype == GGML_TYPE_COUNT) { - fprintf(stderr, "%s: invalid model (bad ftype value %d)\n", __func__, model.hparams.ftype); + log("%s: invalid model (bad ftype value %d)\n", __func__, model.hparams.ftype); return false; } const size_t scale = model.hparams.ftype ? 1 : 2; - fprintf(stderr, "%s: n_vocab = %d\n", __func__, hparams.n_vocab); - fprintf(stderr, "%s: n_audio_ctx = %d\n", __func__, hparams.n_audio_ctx); - fprintf(stderr, "%s: n_audio_state = %d\n", __func__, hparams.n_audio_state); - fprintf(stderr, "%s: n_audio_head = %d\n", __func__, hparams.n_audio_head); - fprintf(stderr, "%s: n_audio_layer = %d\n", __func__, hparams.n_audio_layer); - fprintf(stderr, "%s: n_text_ctx = %d\n", __func__, hparams.n_text_ctx); - fprintf(stderr, "%s: n_text_state = %d\n", __func__, hparams.n_text_state); - fprintf(stderr, "%s: n_text_head = %d\n", __func__, hparams.n_text_head); - fprintf(stderr, "%s: n_text_layer = %d\n", __func__, hparams.n_text_layer); - fprintf(stderr, "%s: n_mels = %d\n", __func__, hparams.n_mels); - fprintf(stderr, "%s: ftype = %d\n", __func__, model.hparams.ftype); - fprintf(stderr, "%s: qntvr = %d\n", __func__, qntvr); - fprintf(stderr, "%s: type = %d\n", __func__, model.type); + log("%s: n_vocab = %d\n", __func__, hparams.n_vocab); + log("%s: n_audio_ctx = %d\n", __func__, hparams.n_audio_ctx); + log("%s: n_audio_state = %d\n", __func__, hparams.n_audio_state); + log("%s: n_audio_head = %d\n", __func__, hparams.n_audio_head); + log("%s: n_audio_layer = %d\n", __func__, hparams.n_audio_layer); + log("%s: n_text_ctx = %d\n", __func__, hparams.n_text_ctx); + log("%s: n_text_state = %d\n", __func__, hparams.n_text_state); + log("%s: n_text_head = %d\n", __func__, hparams.n_text_head); + log("%s: n_text_layer = %d\n", __func__, hparams.n_text_layer); + log("%s: n_mels = %d\n", __func__, hparams.n_mels); + log("%s: ftype = %d\n", __func__, model.hparams.ftype); + log("%s: qntvr = %d\n", __func__, qntvr); + log("%s: type = %d\n", __func__, model.type); // print memory requirements { @@ -912,7 +928,7 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con const size_t mem_required_decoder = scale*MEM_REQ_KV_SELF.at(model.type); - fprintf(stderr, "%s: mem required = %7.2f MB (+ %7.2f MB per decoder)\n", __func__, + log("%s: mem required = %7.2f MB (+ %7.2f MB per decoder)\n", __func__, mem_required / 1024.0 / 1024.0, mem_required_decoder / 1024.0 / 1024.0); } @@ -944,7 +960,7 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con read_safe(loader, n_vocab); //if (n_vocab != model.hparams.n_vocab) { - // fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n", + // log("%s: invalid model file '%s' (bad vocab size %d != %d)\n", // __func__, fname.c_str(), n_vocab, model.hparams.n_vocab); // return false; //} @@ -964,7 +980,7 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con word.assign(&tmp[0], tmp.size()); } else { // seems like we have an empty-string token in multi-language models (i = 50256) - //fprintf(stderr, "%s: warning: empty-string token in vocab, i = %d\n", __func__, i); + //log("%s: warning: empty-string token in vocab, i = %d\n", __func__, i); word = ""; } @@ -988,7 +1004,7 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con } if (n_vocab < model.hparams.n_vocab) { - fprintf(stderr, "%s: adding %d extra tokens\n", __func__, model.hparams.n_vocab - n_vocab); + log("%s: adding %d extra tokens\n", __func__, model.hparams.n_vocab - n_vocab); for (int i = n_vocab; i < model.hparams.n_vocab; i++) { if (i > vocab.token_beg) { word = "[_TT_" + std::to_string(i - vocab.token_beg) + "]"; @@ -1127,7 +1143,7 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con ctx_size += (15 + 15*n_audio_layer + 24*n_text_layer)*512; // object overhead - fprintf(stderr, "%s: model ctx = %7.2f MB\n", __func__, ctx_size/(1024.0*1024.0)); + log("%s: model ctx = %7.2f MB\n", __func__, ctx_size/(1024.0*1024.0)); } // create the ggml context @@ -1140,7 +1156,7 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con model.ctx = ggml_init(params); if (!model.ctx) { - fprintf(stderr, "%s: ggml_init() failed\n", __func__); + log("%s: ggml_init() failed\n", __func__); return false; } } @@ -1373,20 +1389,20 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con name.assign(&tmp[0], tmp.size()); if (model.tensors.find(name) == model.tensors.end()) { - fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data()); + log("%s: unknown tensor '%s' in model file\n", __func__, name.data()); return false; } auto tensor = model.tensors[name.data()]; if (ggml_nelements(tensor) != nelements) { - fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data()); - fprintf(stderr, "%s: shape: [%d, %d, %d], expected: [%d, %d, %d]\n", + log("%s: tensor '%s' has wrong size in model file\n", __func__, name.data()); + log("%s: shape: [%d, %d, %d], expected: [%d, %d, %d]\n", __func__, ne[0], ne[1], ne[2], (int) tensor->ne[0], (int) tensor->ne[1], (int) tensor->ne[2]); return false; } if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1] || tensor->ne[2] != ne[2]) { - fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d, %d], expected [%d, %d, %d]\n", + log("%s: tensor '%s' has wrong shape in model file: got [%d, %d, %d], expected [%d, %d, %d]\n", __func__, name.data(), (int) tensor->ne[0], (int) tensor->ne[1], (int) tensor->ne[2], ne[0], ne[1], ne[2]); return false; } @@ -1394,7 +1410,7 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con const size_t bpe = ggml_type_size(ggml_type(ttype)); if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { - fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", + log("%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", __func__, name.data(), ggml_nbytes(tensor), nelements*bpe); return false; } @@ -1407,12 +1423,12 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con model.n_loaded++; } - fprintf(stderr, "%s: model size = %7.2f MB\n", __func__, total_size/1024.0/1024.0); + log("%s: model size = %7.2f MB\n", __func__, total_size/1024.0/1024.0); if (model.n_loaded == 0) { - fprintf(stderr, "%s: WARN no tensors loaded from model file - assuming empty model for testing\n", __func__); + log("%s: WARN no tensors loaded from model file - assuming empty model for testing\n", __func__); } else if (model.n_loaded != (int) model.tensors.size()) { - fprintf(stderr, "%s: ERROR not all tensors loaded from model file - expected %zu, got %d\n", __func__, model.tensors.size(), model.n_loaded); + log("%s: ERROR not all tensors loaded from model file - expected %zu, got %d\n", __func__, model.tensors.size(), model.n_loaded); return false; } } @@ -2616,7 +2632,7 @@ static std::vector tokenize(const whisper_vocab & vocab, cons --j; } if (!found) { - fprintf(stderr, "unknown token \n"); + log("unknown token\n"); ++i; } } @@ -2683,41 +2699,41 @@ struct whisper_state * whisper_init_state(whisper_context * ctx) { const size_t scale = ctx->model.hparams.ftype ? 1 : 2; if (!kv_cache_init(ctx->model.hparams, scale * MEM_REQ_KV_SELF.at(ctx->model.type), state->decoders[0].kv_self, ctx->itype, ctx->model.hparams.n_text_ctx)) { - fprintf(stderr, "%s: kv_cache_init() failed for self-attention cache\n", __func__); + log("%s: kv_cache_init() failed for self-attention cache\n", __func__); delete state; return nullptr; } { const size_t memory_size = ggml_nbytes(state->decoders[0].kv_self.k) + ggml_nbytes(state->decoders[0].kv_self.v); - fprintf(stderr, "%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0); + log("%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0); } if (!kv_cache_init(ctx->model.hparams, scale * MEM_REQ_KV_CROSS.at(ctx->model.type), state->kv_cross, ctx->itype, ctx->model.hparams.n_audio_ctx)) { - fprintf(stderr, "%s: kv_cache_init() failed for cross-attention cache\n", __func__); + log("%s: kv_cache_init() failed for cross-attention cache\n", __func__); delete state; return nullptr; } { const size_t memory_size = ggml_nbytes(state->kv_cross.k) + ggml_nbytes(state->kv_cross.v); - fprintf(stderr, "%s: kv cross size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0); + log("%s: kv cross size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0); } #ifdef WHISPER_USE_COREML const auto path_coreml = whisper_get_coreml_path_encoder(ctx->path_model); - fprintf(stderr, "%s: loading Core ML model from '%s'\n", __func__, path_coreml.c_str()); - fprintf(stderr, "%s: first run on a device may take a while ...\n", __func__); + log("%s: loading Core ML model from '%s'\n", __func__, path_coreml.c_str()); + log("%s: first run on a device may take a while ...\n", __func__); state->ctx_coreml = whisper_coreml_init(path_coreml.c_str()); if (!state->ctx_coreml) { - fprintf(stderr, "%s: failed to load Core ML model from '%s'\n", __func__, path_coreml.c_str()); + log("%s: failed to load Core ML model from '%s'\n", __func__, path_coreml.c_str()); #ifndef WHISPER_COREML_ALLOW_FALLBACK return nullptr; #endif } else { - fprintf(stderr, "%s: Core ML model loaded\n", __func__); + log("%s: Core ML model loaded\n", __func__); } #endif @@ -2757,7 +2773,7 @@ int whisper_ctx_init_openvino_encoder( return 1; #else if (!model_path && ctx->path_model.empty()) { - fprintf(stderr, "%s: model_path is nullptr, and ctx has no model_path set.\n", __func__); + log("%s: model_path is nullptr, and ctx has no model_path set.\n", __func__); return 1; } @@ -2777,15 +2793,15 @@ int whisper_ctx_init_openvino_encoder( path_cache = cache_dir; } - fprintf(stderr, "%s: loading OpenVINO model from '%s'\n", __func__, path_encoder.c_str()); - fprintf(stderr, "%s: first run on a device may take a while ...\n", __func__); + log("%s: loading OpenVINO model from '%s'\n", __func__, path_encoder.c_str()); + log("%s: first run on a device may take a while ...\n", __func__); ctx->state->ctx_openvino = whisper_openvino_init(path_encoder.c_str(), device, path_cache.c_str()); if (!ctx->state->ctx_openvino) { - fprintf(stderr, "%s: failed to init OpenVINO encoder from '%s'\n", __func__, path_encoder.c_str()); + log("%s: failed to init OpenVINO encoder from '%s'\n", __func__, path_encoder.c_str()); return 1; } else { - fprintf(stderr, "%s: OpenVINO model loaded\n", __func__); + log("%s: OpenVINO model loaded\n", __func__); } return 0; @@ -2794,11 +2810,11 @@ int whisper_ctx_init_openvino_encoder( struct whisper_context * whisper_init_from_file_no_state(const char * path_model) { - fprintf(stderr, "%s: loading model from '%s'\n", __func__, path_model); + log("%s: loading model from '%s'\n", __func__, path_model); auto fin = std::ifstream(path_model, std::ios::binary); if (!fin) { - fprintf(stderr, "%s: failed to open '%s'\n", __func__, path_model); + log("%s: failed to open '%s'\n", __func__, path_model); return nullptr; } @@ -2840,7 +2856,7 @@ struct whisper_context * whisper_init_from_buffer_no_state(void * buffer, size_t buf_context ctx = { reinterpret_cast(buffer), buffer_size, 0 }; - fprintf(stderr, "%s: loading model from buffer\n", __func__); + log("%s: loading model from buffer\n", __func__); whisper_model_loader loader = {}; @@ -2875,7 +2891,7 @@ struct whisper_context * whisper_init_no_state(struct whisper_model_loader * loa if (!whisper_model_load(loader, *ctx)) { loader->close(loader->context); - fprintf(stderr, "%s: failed to load model\n", __func__); + log("%s: failed to load model\n", __func__); delete ctx; return nullptr; } @@ -2980,7 +2996,7 @@ void whisper_free_params(struct whisper_full_params * params) { int whisper_pcm_to_mel_with_state(struct whisper_context * ctx, struct whisper_state * state, const float * samples, int n_samples, int n_threads) { if (!log_mel_spectrogram(*state, samples, n_samples, WHISPER_SAMPLE_RATE, WHISPER_N_FFT, WHISPER_HOP_LENGTH, WHISPER_N_MEL, n_threads, ctx->model.filters, false, state->mel)) { - fprintf(stderr, "%s: failed to compute mel spectrogram\n", __func__); + log("%s: failed to compute mel spectrogram\n", __func__); return -1; } @@ -2994,7 +3010,7 @@ int whisper_pcm_to_mel(struct whisper_context * ctx, const float * samples, int // same as whisper_pcm_to_mel, but applies a Phase Vocoder to speed up the audio x2 int whisper_pcm_to_mel_phase_vocoder_with_state(struct whisper_context * ctx, struct whisper_state * state, const float * samples, int n_samples, int n_threads) { if (!log_mel_spectrogram(*state, samples, n_samples, WHISPER_SAMPLE_RATE, 2 * WHISPER_N_FFT, 2 * WHISPER_HOP_LENGTH, WHISPER_N_MEL, n_threads, ctx->model.filters, true, state->mel)) { - fprintf(stderr, "%s: failed to compute mel spectrogram\n", __func__); + log("%s: failed to compute mel spectrogram\n", __func__); return -1; } @@ -3013,7 +3029,7 @@ int whisper_set_mel_with_state( int n_len, int n_mel) { if (n_mel != WHISPER_N_MEL) { - fprintf(stderr, "%s: invalid number of mel bands: %d (expected %d)\n", __func__, n_mel, WHISPER_N_MEL); + log("%s: invalid number of mel bands: %d (expected %d)\n", __func__, n_mel, WHISPER_N_MEL); return -1; } @@ -3037,7 +3053,7 @@ int whisper_set_mel( int whisper_encode_with_state(struct whisper_context * ctx, struct whisper_state * state, int offset, int n_threads) { if (!whisper_encode_internal(*ctx, *state, offset, n_threads)) { - fprintf(stderr, "%s: failed to eval\n", __func__); + log("%s: failed to eval\n", __func__); return -1; } @@ -3046,7 +3062,7 @@ int whisper_encode_with_state(struct whisper_context * ctx, struct whisper_state int whisper_encode(struct whisper_context * ctx, int offset, int n_threads) { if (!whisper_encode_internal(*ctx, *ctx->state, offset, n_threads)) { - fprintf(stderr, "%s: failed to eval\n", __func__); + log("%s: failed to eval\n", __func__); return -1; } @@ -3057,7 +3073,7 @@ int whisper_decode_with_state(struct whisper_context * ctx, struct whisper_state const int selected_decoder_id = 0; if (!whisper_decode_internal(*ctx, *state, state->decoders[selected_decoder_id], tokens, n_tokens, n_past, n_threads)) { - fprintf(stderr, "%s: failed to eval\n", __func__); + log("%s: failed to eval\n", __func__); return 1; } @@ -3069,13 +3085,13 @@ int whisper_decode(struct whisper_context * ctx, const whisper_token * tokens, i const int selected_decoder_id = 0; if (ctx->state == nullptr) { - fprintf(stderr, "%s: ERROR state was not loaded.\n", __func__); + log("%s: ERROR state was not loaded.\n", __func__); return false; } if (!whisper_decode_internal(*ctx, *ctx->state, ctx->state->decoders[selected_decoder_id], tokens, n_tokens, n_past, n_threads)) { - fprintf(stderr, "%s: failed to eval\n", __func__); + log("%s: failed to eval\n", __func__); return 1; } @@ -3086,7 +3102,7 @@ int whisper_tokenize(struct whisper_context * ctx, const char * text, whisper_to const auto res = tokenize(ctx->vocab, text); if (n_max_tokens < (int) res.size()) { - fprintf(stderr, "%s: too many resulting tokens: %d (max %d)\n", __func__, (int) res.size(), n_max_tokens); + log("%s: too many resulting tokens: %d (max %d)\n", __func__, (int) res.size(), n_max_tokens); return -1; } @@ -3114,7 +3130,7 @@ int whisper_lang_id(const char * lang) { } } - fprintf(stderr, "%s: unknown language '%s'\n", __func__, lang); + log("%s: unknown language '%s'\n", __func__, lang); return -1; } return g_lang.at(lang).first; @@ -3127,7 +3143,7 @@ const char * whisper_lang_str(int id) { } } - fprintf(stderr, "%s: unknown language id %d\n", __func__, id); + log("%s: unknown language id %d\n", __func__, id); return nullptr; } @@ -3140,25 +3156,25 @@ int whisper_lang_auto_detect_with_state( const int seek = offset_ms/10; if (seek < 0) { - fprintf(stderr, "%s: offset %dms is before the start of the audio\n", __func__, offset_ms); + log("%s: offset %dms is before the start of the audio\n", __func__, offset_ms); return -1; } if (seek >= state->mel.n_len_org) { - fprintf(stderr, "%s: offset %dms is past the end of the audio (%dms)\n", __func__, offset_ms, state->mel.n_len_org*10); + log("%s: offset %dms is past the end of the audio (%dms)\n", __func__, offset_ms, state->mel.n_len_org*10); return -2; } // run the encoder if (whisper_encode_with_state(ctx, state, seek, n_threads) != 0) { - fprintf(stderr, "%s: failed to encode\n", __func__); + log("%s: failed to encode\n", __func__); return -6; } const std::vector prompt = { whisper_token_sot(ctx) }; if (whisper_decode_with_state(ctx, state, prompt.data(), prompt.size(), 0, n_threads) != 0) { - fprintf(stderr, "%s: failed to decode\n", __func__); + log("%s: failed to decode\n", __func__); return -7; } @@ -3359,21 +3375,21 @@ whisper_token whisper_token_transcribe(struct whisper_context * ctx) { void whisper_print_timings(struct whisper_context * ctx) { const int64_t t_end_us = ggml_time_us(); - fprintf(stderr, "\n"); - fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, ctx->t_load_us / 1000.0f); + log("\n"); + log("%s: load time = %8.2f ms\n", __func__, ctx->t_load_us / 1000.0f); if (ctx->state != nullptr) { const int32_t n_sample = std::max(1, ctx->state->n_sample); const int32_t n_encode = std::max(1, ctx->state->n_encode); const int32_t n_decode = std::max(1, ctx->state->n_decode); - fprintf(stderr, "%s: fallbacks = %3d p / %3d h\n", __func__, ctx->state->n_fail_p, ctx->state->n_fail_h); - fprintf(stderr, "%s: mel time = %8.2f ms\n", __func__, ctx->state->t_mel_us / 1000.0f); - fprintf(stderr, "%s: sample time = %8.2f ms / %5d runs (%8.2f ms per run)\n", __func__, 1e-3f * ctx->state->t_sample_us, n_sample, 1e-3f * ctx->state->t_sample_us / n_sample); - fprintf(stderr, "%s: encode time = %8.2f ms / %5d runs (%8.2f ms per run)\n", __func__, 1e-3f * ctx->state->t_encode_us, n_encode, 1e-3f * ctx->state->t_encode_us / n_encode); - fprintf(stderr, "%s: decode time = %8.2f ms / %5d runs (%8.2f ms per run)\n", __func__, 1e-3f * ctx->state->t_decode_us, n_decode, 1e-3f * ctx->state->t_decode_us / n_decode); + log("%s: fallbacks = %3d p / %3d h\n", __func__, ctx->state->n_fail_p, ctx->state->n_fail_h); + log("%s: mel time = %8.2f ms\n", __func__, ctx->state->t_mel_us / 1000.0f); + log("%s: sample time = %8.2f ms / %5d runs (%8.2f ms per run)\n", __func__, 1e-3f * ctx->state->t_sample_us, n_sample, 1e-3f * ctx->state->t_sample_us / n_sample); + log("%s: encode time = %8.2f ms / %5d runs (%8.2f ms per run)\n", __func__, 1e-3f * ctx->state->t_encode_us, n_encode, 1e-3f * ctx->state->t_encode_us / n_encode); + log("%s: decode time = %8.2f ms / %5d runs (%8.2f ms per run)\n", __func__, 1e-3f * ctx->state->t_decode_us, n_decode, 1e-3f * ctx->state->t_decode_us / n_decode); } - fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (t_end_us - ctx->t_start_us)/1000.0f); + log("%s: total time = %8.2f ms\n", __func__, (t_end_us - ctx->t_start_us)/1000.0f); } void whisper_reset_timings(struct whisper_context * ctx) { @@ -3697,7 +3713,7 @@ static void whisper_process_logits( const bool last_was_timestamp = tokens_cur.size() > 0 && tokens_cur.back().id >= vocab.token_beg; const bool penultimate_was_timestamp = tokens_cur.size() < 2 || tokens_cur[tokens_cur.size() - 2].id >= vocab.token_beg; - //fprintf(stderr, "last_was_timestamp=%d penultimate_was_timestamp=%d\n", last_was_timestamp, penultimate_was_timestamp); + //log("last_was_timestamp=%d penultimate_was_timestamp=%d\n", last_was_timestamp, penultimate_was_timestamp); if (last_was_timestamp) { if (penultimate_was_timestamp) { @@ -3773,7 +3789,7 @@ static void whisper_process_logits( const float max_text_token_logprob = *std::max_element(logprobs.begin(), logprobs.begin() + vocab.token_beg); - //fprintf(stderr, "timestamp_logprob=%f max_text_token_logprob=%f\n", timestamp_logprob, max_text_token_logprob); + //log("timestamp_logprob=%f max_text_token_logprob=%f\n", timestamp_logprob, max_text_token_logprob); if (timestamp_logprob > max_text_token_logprob) { for (int i = 0; i < vocab.token_beg; ++i) { @@ -4022,12 +4038,12 @@ int whisper_full_with_state( // compute log mel spectrogram if (params.speed_up) { if (whisper_pcm_to_mel_phase_vocoder_with_state(ctx, state, samples, n_samples, params.n_threads) != 0) { - fprintf(stderr, "%s: failed to compute log mel spectrogram\n", __func__); + log("%s: failed to compute log mel spectrogram\n", __func__); return -1; } } else { if (whisper_pcm_to_mel_with_state(ctx, state, samples, n_samples, params.n_threads) != 0) { - fprintf(stderr, "%s: failed to compute log mel spectrogram\n", __func__); + log("%s: failed to compute log mel spectrogram\n", __func__); return -2; } } @@ -4038,13 +4054,13 @@ int whisper_full_with_state( const auto lang_id = whisper_lang_auto_detect_with_state(ctx, state, 0, params.n_threads, probs.data()); if (lang_id < 0) { - fprintf(stderr, "%s: failed to auto-detect language\n", __func__); + log("%s: failed to auto-detect language\n", __func__); return -3; } state->lang_id = lang_id; params.language = whisper_lang_str(lang_id); - fprintf(stderr, "%s: auto-detected language: %s (p = %f)\n", __func__, params.language, probs[whisper_lang_id(params.language)]); + log("%s: auto-detected language: %s (p = %f)\n", __func__, params.language, probs[whisper_lang_id(params.language)]); if (params.detect_language) { return 0; } @@ -4101,7 +4117,7 @@ int whisper_full_with_state( if (decoder.kv_self.ctx == nullptr) { decoder.kv_self = state->decoders[0].kv_self; if (!kv_cache_reinit(decoder.kv_self)) { - fprintf(stderr, "%s: kv_cache_reinit() failed for self-attention, decoder %d\n", __func__, j); + log("%s: kv_cache_reinit() failed for self-attention, decoder %d\n", __func__, j); return -4; } @@ -4145,7 +4161,7 @@ int whisper_full_with_state( // overwrite audio_ctx, max allowed is hparams.n_audio_ctx if (params.audio_ctx > whisper_n_audio_ctx(ctx)) { - fprintf(stderr, "%s: audio_ctx is larger than the maximum allowed (%d > %d)\n", __func__, params.audio_ctx, whisper_n_audio_ctx(ctx)); + log("%s: audio_ctx is larger than the maximum allowed (%d > %d)\n", __func__, params.audio_ctx, whisper_n_audio_ctx(ctx)); return -5; } state->exp_n_audio_ctx = params.audio_ctx; @@ -4189,8 +4205,9 @@ int whisper_full_with_state( // main loop while (true) { - const int progress_cur = (100*(seek - seek_start))/(seek_end - seek_start); if (params.progress_callback) { + const int progress_cur = (100*(seek - seek_start))/(seek_end - seek_start); + params.progress_callback( ctx, ctx->state, progress_cur, params.progress_callback_user_data); } @@ -4202,14 +4219,14 @@ int whisper_full_with_state( if (params.encoder_begin_callback) { if (params.encoder_begin_callback(ctx, state, params.encoder_begin_callback_user_data) == false) { - fprintf(stderr, "%s: encoder_begin_callback returned false - aborting\n", __func__); + log("%s: encoder_begin_callback returned false - aborting\n", __func__); break; } } // encode audio features starting at offset seek if (!whisper_encode_internal(*ctx, *state, seek, params.n_threads)) { - fprintf(stderr, "%s: failed to encode\n", __func__); + log("%s: failed to encode\n", __func__); return -6; } @@ -4292,7 +4309,7 @@ int whisper_full_with_state( WHISPER_PRINT_DEBUG("\n\n"); if (!whisper_decode_internal(*ctx, *state, state->decoders[0], prompt.data(), prompt.size(), 0, params.n_threads)) { - fprintf(stderr, "%s: failed to decode\n", __func__); + log("%s: failed to decode\n", __func__); return -7; } @@ -4530,7 +4547,7 @@ int whisper_full_with_state( //WHISPER_PRINT_DEBUG("%s: decoder %d: token %d, kv_self.n %d, seek_delta %d\n", __func__, j, decoder.tokens_tmp[0], decoder.kv_self.n, decoder.seek_delta); if (!whisper_decode_internal(*ctx, *state, decoder, decoder.tokens_tmp.data(), decoder.tokens_tmp.size(), decoder.kv_self.n, params.n_threads)) { - fprintf(stderr, "%s: failed to decode\n", __func__); + log("%s: failed to decode\n", __func__); return -8; } @@ -4852,12 +4869,12 @@ int whisper_full_parallel( ctx->state->t_decode_us /= n_processors; // print information about the audio boundaries - fprintf(stderr, "\n"); - fprintf(stderr, "%s: the audio has been split into %d chunks at the following times:\n", __func__, n_processors); + log("\n"); + log("%s: the audio has been split into %d chunks at the following times:\n", __func__, n_processors); for (int i = 0; i < n_processors - 1; ++i) { - fprintf(stderr, "%s: split %d - %s\n", __func__, (i + 1), to_timestamp(100*((i + 1)*n_samples_per_processor)/WHISPER_SAMPLE_RATE + offset_t).c_str()); + log("%s: split %d - %s\n", __func__, (i + 1), to_timestamp(100*((i + 1)*n_samples_per_processor)/WHISPER_SAMPLE_RATE + offset_t).c_str()); } - fprintf(stderr, "%s: the transcription quality may be degraded near these boundaries\n", __func__); + log("%s: the transcription quality may be degraded near these boundaries\n", __func__); return ret; } @@ -5217,7 +5234,7 @@ static void whisper_exp_compute_token_level_timestamps( const int n_samples = state.energy.size(); if (n_samples == 0) { - fprintf(stderr, "%s: no signal data available\n", __func__); + log("%s: no signal data available\n", __func__); return; } @@ -5437,3 +5454,7 @@ static void whisper_exp_compute_token_level_timestamps( // } //} } + +void whisper_set_log_callback(whisper_log_callback callback) { + whisper_log = callback; +} diff --git a/whisper.h b/whisper.h index 83af11b..89172e7 100644 --- a/whisper.h +++ b/whisper.h @@ -517,6 +517,11 @@ extern "C" { WHISPER_API int whisper_bench_ggml_mul_mat (int n_threads); WHISPER_API const char * whisper_bench_ggml_mul_mat_str(int n_threads); + // Control logging output; default behavior is to print to stderr + + typedef void (*whisper_log_callback)(const char * line); + WHISPER_API void whisper_set_log_callback(whisper_log_callback callback); + #ifdef __cplusplus } #endif