llama : fix non-quantization of expert gating tensors (#5754)

This reverts a single line from #5475
This commit is contained in:
compilade 2024-02-28 03:52:56 -05:00 committed by GitHub
parent 177628bfd8
commit adcb12a9ba
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -11162,7 +11162,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
quantize &= !params->only_copy;
// do not quantize expert gating tensors
quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_FFN_GATE_INP, "weight");
// NOTE: can't use LLM_TN here because the layer number is not known
quantize &= name.find("ffn_gate_inp.weight") == std::string::npos;
// do not quantize positional embeddings and token types (BERT)
quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_POS_EMBD, "weight");