diff --git a/examples/main/main.cpp b/examples/main/main.cpp index a5fb65548..7313d06a0 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -799,7 +799,7 @@ int main(int argc, char ** argv) { } const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true); - const auto line_inp = ::llama_tokenize(ctx, buffer, false, false); + const auto line_inp = ::llama_tokenize(ctx, buffer, false, false); const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true); LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp)); diff --git a/llama.cpp b/llama.cpp index 82b7638ae..37df88779 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2327,13 +2327,13 @@ static void llm_load_vocab( } if (special_tokens_definition_mismatch || special_tokens_count_from_verification != special_tokens_count_by_type) { - fprintf(stderr, "%s: warning: Mismatch in special tokens definition ( %u/%zu vs %u/%zu ).\n", + LLAMA_LOG_WARN("%s: mismatch in special tokens definition ( %u/%zu vs %u/%zu ).\n", __func__, special_tokens_count_from_verification, vocab.id_to_token.size(), special_tokens_count_by_type, vocab.id_to_token.size() ); } else { - fprintf(stderr, "%s: Special tokens definition check successful ( %u/%zu ).\n", + LLAMA_LOG_INFO("%s: special tokens definition check successful ( %u/%zu ).\n", __func__, special_tokens_count_from_verification, vocab.id_to_token.size() );