From afd220d9c665e4c19107120ace2f0cb742e28aa1 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 24 Mar 2023 17:21:01 +0200 Subject: [PATCH] Properly free llama_context on failure --- llama.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/llama.cpp b/llama.cpp index 5d56cc90e..cdb862828 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1432,16 +1432,16 @@ struct llama_context * llama_init_from_file( if (!llama_model_load(path_model, *ctx, params.n_ctx, params.n_parts, type_memory, params.vocab_only)) { fprintf(stderr, "%s: failed to load model\n", __func__); - delete ctx; + llama_free(ctx); return nullptr; } - + if (params.use_mlock) { char *err; if (!ggml_mlock(ctx->model.ctx, &err)) { fprintf(stderr, "%s\n", err); free(err); - delete ctx; + llama_free(ctx); return nullptr; } } @@ -1464,7 +1464,9 @@ struct llama_context * llama_init_from_file( } void llama_free(struct llama_context * ctx) { - ggml_free(ctx->model.ctx); + if (ctx->model.ctx) { + ggml_free(ctx->model.ctx); + } delete ctx; }