From 2a4bcbacead886996f175f33479d1d874a3e577f Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Fri, 13 Oct 2023 12:33:16 +0200 Subject: [PATCH] llama : remove n_threads from llama_decode_internal (#3614) This commit removes `n_threads` from the `llama_decode_internal` functions doc comment as it does not exist anymore. It looks like this parameter was removed in Commit 16bc66d9479edd5ee12ec734973554d4493c5dfa ("llama.cpp : split llama_context_params into model and context params"). Signed-off-by: Daniel Bevenius --- llama.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 7ed872237..2cd2dad7f 100644 --- a/llama.cpp +++ b/llama.cpp @@ -5721,7 +5721,6 @@ static struct ggml_cgraph * llama_build_graph( // // - lctx: llama context // - batch: batch to evaluate -// - n_threads: number of threads to use // // return 0 on success // return positive int on warning