llama : assume tied weights if lm_head/output weights is missing (#5824)

This is to support model configurations with "tie_word_embeddings" set to true.

Co-authored-by: Don Mahurin <2797413+dmahurin@users.noreply.github.com>
This commit is contained in:
Don Mahurin 2024-03-08 02:41:50 -08:00 committed by GitHub
parent af37fd8b30
commit e457fb3540
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -3888,7 +3888,13 @@ static bool llm_load_tensors(
{
model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
if (model.arch != LLM_ARCH_MINICPM){
model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, false);
// if output is NULL, init from the input tok embed
if (model.output == NULL) {
model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
ml.n_created--; // artificial tensor
ml.size_data += ggml_nbytes(model.output);
}
}
}