From 98ed16557432d7a5179c57eddcc3a08a7ae6d54d Mon Sep 17 00:00:00 2001 From: Robert Sung-wook Shin Date: Sat, 10 Jun 2023 01:24:40 +0900 Subject: [PATCH] OpenCL: Add release memory (#1741) * Add opencl release memory * Rename function name --- ggml-opencl.cpp | 9 +++++++++ ggml-opencl.h | 2 ++ llama.cpp | 6 +++++- 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/ggml-opencl.cpp b/ggml-opencl.cpp index 81a975cf8..7b6daf4a8 100644 --- a/ggml-opencl.cpp +++ b/ggml-opencl.cpp @@ -662,6 +662,15 @@ static void ggml_cl_pool_free(cl_mem mem, size_t size) { clReleaseMemObject(mem); } +void ggml_cl_free_data(const struct ggml_tensor* tensor) { + if (tensor->backend != GGML_BACKEND_GPU) { + return; + } + + cl_mem mem = (cl_mem)tensor->data; + clReleaseMemObject(mem); +} + static cl_int ggml_cl_h2d_tensor_2d(cl_command_queue queue, cl_mem dst, size_t offset, const struct ggml_tensor * src, uint64_t i3, uint64_t i2, cl_event* ev) { cl_int err; const uint64_t ne0 = src->ne[0]; diff --git a/ggml-opencl.h b/ggml-opencl.h index c850bb8ad..bf95e5cd0 100644 --- a/ggml-opencl.h +++ b/ggml-opencl.h @@ -16,6 +16,8 @@ void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor void * ggml_cl_host_malloc(size_t size); void ggml_cl_host_free(void * ptr); +void ggml_cl_free_data(const struct ggml_tensor* tensor); + void ggml_cl_transform_tensor(struct ggml_tensor * tensor); void ggml_cl_load_data(const char * fname, struct ggml_tensor * tensor, size_t offset); diff --git a/llama.cpp b/llama.cpp index 16d6f6ef1..f40c5afa2 100644 --- a/llama.cpp +++ b/llama.cpp @@ -210,7 +210,11 @@ struct llama_model { for (size_t i = 0; i < tensors_by_name.size(); ++i) { ggml_cuda_free_data(tensors_by_name[i].second); } -#endif // GGML_USE_CUBLAS +#elif defined(GGML_USE_CLBLAST) + for (size_t i = 0; i < tensors_by_name.size(); ++i) { + ggml_cl_free_data(tensors_by_name[i].second); + } +#endif } };