From 10f19c1121068ce3dab9bece03a8b9caaea2db36 Mon Sep 17 00:00:00 2001 From: eiery <19350831+eiery@users.noreply.github.com> Date: Sat, 22 Apr 2023 04:27:05 -0400 Subject: [PATCH] llama : have n_batch default to 512 (#1091) * set default n_batch to 512 when using BLAS * spacing * alternate implementation of setting different n_batch for BLAS * set n_batch to 512 for all cases --- examples/common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/common.h b/examples/common.h index cbbc2dfab..0470368d5 100644 --- a/examples/common.h +++ b/examples/common.h @@ -20,7 +20,7 @@ struct gpt_params { int32_t repeat_last_n = 64; // last n tokens to penalize int32_t n_parts = -1; // amount of model parts (-1 = determine from model dimensions) int32_t n_ctx = 512; // context size - int32_t n_batch = 8; // batch size for prompt processing + int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS) int32_t n_keep = 0; // number of tokens to keep from initial prompt // sampling parameters