From 3d59ec5935ea1d33e9d51060a8dd737169b9b89b Mon Sep 17 00:00:00 2001 From: Howard Su Date: Sat, 17 Jun 2023 23:46:15 +0800 Subject: [PATCH] ggml : fix warnings under MSVC (#1908) --- ggml-cuda.cu | 4 ++++ ggml-opencl.cpp | 4 ++++ llama.cpp | 2 +- 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 7edd1a9f8..fed2a7ce1 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -13,6 +13,10 @@ #include "ggml-cuda.h" #include "ggml.h" +#if defined(_MSC_VER) +#pragma warning(disable: 4244 4267) // possible loss of data +#endif + static_assert(sizeof(half) == sizeof(ggml_fp16_t), "wrong fp16 size"); #define CUDA_CHECK(err) \ diff --git a/ggml-opencl.cpp b/ggml-opencl.cpp index 1d4db96ee..95f4cec6d 100644 --- a/ggml-opencl.cpp +++ b/ggml-opencl.cpp @@ -15,6 +15,10 @@ #include "ggml.h" +#if defined(_MSC_VER) +#pragma warning(disable: 4244 4267) // possible loss of data +#endif + #define CL_DMMV_BLOCK_SIZE 32 #define MULTILINE_QUOTE(...) #__VA_ARGS__ diff --git a/llama.cpp b/llama.cpp index 81f047ed2..a50846f71 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1253,7 +1253,7 @@ static void llama_model_load_internal( vram_scratch = n_batch * MB; ggml_cuda_set_scratch_size(vram_scratch); if (n_gpu_layers > 0) { - fprintf(stderr, "%s: allocating batch_size x 1 MB = %ld MB VRAM for the scratch buffer\n", + fprintf(stderr, "%s: allocating batch_size x 1 MB = %zd MB VRAM for the scratch buffer\n", __func__, vram_scratch / MB); } }