talk-llama : fix n_gpu_layers usage again (#1442)

pull/1445/head
Jhen-Jie Hong 2023-11-07 16:51:27 +08:00 committed by GitHub
parent 0c91aef2d8
commit 75dc800d21
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 1 additions and 1 deletions

View File

@ -267,7 +267,7 @@ int main(int argc, char ** argv) {
auto lmparams = llama_model_default_params();
if (!params.use_gpu) {
lcparams.lmparams = 0;
lmparams.n_gpu_layers = 0;
}
struct llama_model * model_llama = llama_load_model_from_file(params.model_llama.c_str(), lmparams);