cuda : fix LLAMA_CUDA_F16 (llama/5262)

pull/1768/merge
slaren 2024-02-01 18:30:17 +01:00 committed by Georgi Gerganov
parent 9b735cea77
commit 1b5bb7792e
No known key found for this signature in database
GPG Key ID: 449E073F9DC10735
1 changed files with 3 additions and 3 deletions

View File

@ -8657,9 +8657,9 @@ static void ggml_cuda_op_dequantize_mul_mat_vec(
if (src1_convert_f16) {
src1_dfloat = src1_dfloat_a.alloc(ne00);
ggml_cpy_f32_f16_cuda((const char *) src1_ddf_i, (char *) src1_dfloat, ne00,
ne00, 1, sizeof(float), 0, 0,
ne00, 1, sizeof(half), 0, 0, stream);
const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src1->type);
GGML_ASSERT(to_fp16_cuda != nullptr);
to_fp16_cuda(src1_ddf_i, src1_dfloat, ne00, stream);
}
#else
const dfloat * src1_dfloat = (const dfloat *) src1_ddf_i; // dfloat == float, no conversion