From dcb2ed48268e421baf25adc00d602dad0f415564 Mon Sep 17 00:00:00 2001 From: 0cc4m Date: Sun, 4 Jun 2023 08:12:05 +0200 Subject: [PATCH] OpenCL: Fix duplication of layers in VRAM and RAM, add GPU mul kernel (#1653) * Use events instead of clFinish, where possible * OpenCL: Don't load gpu layers into RAM, add mul_f32 kernel * Reduce queueing overhead for contiguous tensors by using single mul kernel call * Adapt to #1612 cl_mem malloc changes * Reduce code duplication between cuda and opencl branches * Improve implementation --- ggml-opencl.cpp | 184 +++++++++++++++++++++++++++++++++++++++++++++--- ggml-opencl.h | 2 + ggml.c | 7 ++ llama.cpp | 57 ++++++++------- 4 files changed, 210 insertions(+), 40 deletions(-) diff --git a/ggml-opencl.cpp b/ggml-opencl.cpp index 9a5cb0535..52ba3aaac 100644 --- a/ggml-opencl.cpp +++ b/ggml-opencl.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #define CL_TARGET_OPENCL_VERSION 110 #include @@ -197,6 +198,18 @@ __kernel void KERNEL_NAME(__global X_TYPE* x, __local float* tmp, __global float } ); +std::string mul_template = MULTILINE_QUOTE( +__kernel void KERNEL_NAME(__global TYPE* x, const int x_offset, __global TYPE* y, const int y_offset, __global TYPE* dst, const int dst_offset, const int ky) { + const int i = get_group_id(0)*get_local_size(0) + get_local_id(0); + + if (i >= get_global_size(0)) { + return; + } + + dst[dst_offset + i] = x[x_offset + i] * y[y_offset + i%ky]; +} +); + #define CL_CHECK(err) \ do { \ cl_int err_ = (err); \ @@ -239,6 +252,13 @@ std::array dequant_mul_mat_vec_str_values = { "convert_mul_mat_vec_f16", "half", "1", "1", "convert_f16" }; +std::array mul_str_keys = { + "KERNEL_NAME", "TYPE" +}; +std::array mul_str_values = { + "mul_f32", "float" +}; + std::string& replace(std::string& s, const std::string& from, const std::string& to) { size_t pos = 0; while ((pos = s.find(from, pos)) != std::string::npos) { @@ -261,6 +281,13 @@ std::string generate_kernels() { src << dequant_kernel << '\n'; src << dmmv_kernel << '\n'; } + for (size_t i = 0; i < mul_str_values.size(); i += mul_str_keys.size()) { + std::string mul_kernel = mul_template; + for (size_t j = 0; j < mul_str_keys.size(); j++) { + replace(mul_kernel, mul_str_keys[j], mul_str_values[i + j]); + } + src << mul_kernel << '\n'; + } return src.str(); } @@ -272,6 +299,7 @@ static cl_program program; static cl_kernel convert_row_f16_cl; static cl_kernel dequantize_row_q4_0_cl, dequantize_row_q4_1_cl, dequantize_row_q5_0_cl, dequantize_row_q5_1_cl, dequantize_row_q8_0_cl; static cl_kernel dequantize_mul_mat_vec_q4_0_cl, dequantize_mul_mat_vec_q4_1_cl, dequantize_mul_mat_vec_q5_0_cl, dequantize_mul_mat_vec_q5_1_cl, dequantize_mul_mat_vec_q8_0_cl, convert_mul_mat_vec_f16_cl; +static cl_kernel mul_f32_cl; static bool fp16_support; static cl_program build_program_from_source(cl_context ctx, cl_device_id dev, const char* program_buffer) { @@ -508,6 +536,9 @@ void ggml_cl_init(void) { CL_CHECK((dequantize_mul_mat_vec_q5_1_cl = clCreateKernel(program, "dequantize_mul_mat_vec_q5_1", &err), err)); CL_CHECK((dequantize_mul_mat_vec_q8_0_cl = clCreateKernel(program, "dequantize_mul_mat_vec_q8_0", &err), err)); CL_CHECK((convert_mul_mat_vec_f16_cl = clCreateKernel(program, "convert_mul_mat_vec_f16", &err), err)); + + // mul kernel + CL_CHECK((mul_f32_cl = clCreateKernel(program, "mul_f32", &err), err)); } static cl_kernel* ggml_get_to_fp32_cl(ggml_type type) { @@ -644,6 +675,98 @@ static cl_int ggml_cl_h2d_tensor_2d(cl_command_queue queue, cl_mem dst, size_t o return err; } +static void ggml_cl_mul_f32(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + GGML_ASSERT(src1->backend == GGML_BACKEND_CL); + const int64_t ne00 = src0->ne[0]; + const int64_t ne01 = src0->ne[1]; + const int64_t ne02 = src0->ne[2]; + const int64_t ne03 = src0->ne[2]; + const int64_t ne0 = ne00 * ne01 * ne02 * ne03; + const int64_t ne10 = src1->ne[0]; + const int64_t ne11 = src1->ne[1]; + const int64_t ne12 = src1->ne[2]; + const int64_t ne13 = src1->ne[3]; + const int64_t nb10 = src1->nb[0]; + const int nb2 = dst->nb[2]; + const int nb3 = dst->nb[3]; + size_t x_size; + size_t d_size; + + cl_mem d_X = ggml_cl_pool_malloc(ne0 * sizeof(float), &x_size, CL_MEM_READ_ONLY); // src0 + cl_mem d_Y = (cl_mem) src1->data; // src1 is already on device, broadcasted. + cl_mem d_D = ggml_cl_pool_malloc(ne0 * sizeof(float), &d_size, CL_MEM_WRITE_ONLY); // dst + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + const int i0 = i03*ne02 + i02; + + cl_event ev; + + // copy src0 to device + CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, i0, src0, i03, i02, &ev)); + + if (nb10 == sizeof(float)) { + // Contiguous, avoid overhead from queueing many kernel runs + const int64_t i13 = i03%ne13; + const int64_t i12 = i02%ne12; + const int i1 = i13*ne12*ne11 + i12*ne11; + + cl_int x_offset = 0; + cl_int y_offset = i1*ne10; + cl_int d_offset = 0; + + size_t global = ne00 * ne01; + cl_int ky = ne10; + CL_CHECK(clSetKernelArg(mul_f32_cl, 0, sizeof(cl_mem), &d_X)); + CL_CHECK(clSetKernelArg(mul_f32_cl, 1, sizeof(cl_int), &x_offset)); + CL_CHECK(clSetKernelArg(mul_f32_cl, 2, sizeof(cl_mem), &d_Y)); + CL_CHECK(clSetKernelArg(mul_f32_cl, 3, sizeof(cl_int), &y_offset)); + CL_CHECK(clSetKernelArg(mul_f32_cl, 4, sizeof(cl_mem), &d_D)); + CL_CHECK(clSetKernelArg(mul_f32_cl, 5, sizeof(cl_int), &d_offset)); + CL_CHECK(clSetKernelArg(mul_f32_cl, 6, sizeof(cl_int), &ky)); + CL_CHECK(clEnqueueNDRangeKernel(queue, mul_f32_cl, 1, NULL, &global, NULL, 1, &ev, NULL)); + } else { + for (int64_t i01 = 0; i01 < ne01; i01++) { + const int64_t i13 = i03%ne13; + const int64_t i12 = i02%ne12; + const int64_t i11 = i01%ne11; + const int i1 = i13*ne12*ne11 + i12*ne11 + i11; + + cl_int x_offset = i01*ne00; + cl_int y_offset = i1*ne10; + cl_int d_offset = i01*ne00; + + // compute + size_t global = ne00; + cl_int ky = ne10; + CL_CHECK(clSetKernelArg(mul_f32_cl, 0, sizeof(cl_mem), &d_X)); + CL_CHECK(clSetKernelArg(mul_f32_cl, 1, sizeof(cl_int), &x_offset)); + CL_CHECK(clSetKernelArg(mul_f32_cl, 2, sizeof(cl_mem), &d_Y)); + CL_CHECK(clSetKernelArg(mul_f32_cl, 3, sizeof(cl_int), &y_offset)); + CL_CHECK(clSetKernelArg(mul_f32_cl, 4, sizeof(cl_mem), &d_D)); + CL_CHECK(clSetKernelArg(mul_f32_cl, 5, sizeof(cl_int), &d_offset)); + CL_CHECK(clSetKernelArg(mul_f32_cl, 6, sizeof(cl_int), &ky)); + CL_CHECK(clEnqueueNDRangeKernel(queue, mul_f32_cl, 1, NULL, &global, NULL, 1, &ev, NULL)); + } + } + + CL_CHECK(clReleaseEvent(ev)); + CL_CHECK(clFinish(queue)); + + // copy dst to host + float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3); + CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(float) * ne00*ne01, d, 0, NULL, NULL)); + } + } + ggml_cl_pool_free(d_X, x_size); + ggml_cl_pool_free(d_D, d_size); +} + +void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) { + GGML_ASSERT(src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32); + ggml_cl_mul_f32(src0, src1, dst); +} + static void ggml_cl_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { const int64_t ne00 = src0->ne[0]; const int64_t ne01 = src0->ne[1]; @@ -860,13 +983,15 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor * cl_kernel* dmmv = ggml_get_dequantize_mul_mat_vec_cl(type); GGML_ASSERT(to_fp32_cl != nullptr); + size_t ev_idx = 0; + std::vector events; + for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { - cl_event ev_sgemm; - // copy src0 to device if necessary if (src0->backend == GGML_BACKEND_CPU) { - CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Q, 0, src0, i03, i02, NULL)); + events.emplace_back(); + CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Q, 0, src0, i03, i02, events.data() + ev_idx++)); } else if (src0->backend == GGML_BACKEND_CL) { d_Q = (cl_mem) src0->data; } else { @@ -874,30 +999,32 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor * } if (mul_mat_vec) { // specialized dequantize_mul_mat_vec kernel // copy src1 to device - CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i03, i02, NULL)); + events.emplace_back(); + CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i03, i02, events.data() + ev_idx++)); // compute const size_t global = ne01 * CL_DMMV_BLOCK_SIZE; const size_t local = CL_DMMV_BLOCK_SIZE; const cl_int ncols = ne00; + events.emplace_back(); CL_CHECK(clSetKernelArg(*dmmv, 0, sizeof(cl_mem), &d_Q)); CL_CHECK(clSetKernelArg(*dmmv, 1, sizeof(float) * local, NULL)); CL_CHECK(clSetKernelArg(*dmmv, 2, sizeof(cl_mem), &d_Y)); CL_CHECK(clSetKernelArg(*dmmv, 3, sizeof(cl_mem), &d_D)); CL_CHECK(clSetKernelArg(*dmmv, 4, sizeof(cl_int), &ncols)); - CL_CHECK(clFinish(queue)); - CL_CHECK(clEnqueueNDRangeKernel(queue, *dmmv, 1, NULL, &global, &local, 0, NULL, &ev_sgemm)); + CL_CHECK(clEnqueueNDRangeKernel(queue, *dmmv, 1, NULL, &global, &local, events.size() - 1, events.data(), events.data() + ev_idx++)); } else { // general dequantization kernel + CLBlast matrix matrix multiplication // convert src0 to fp32 on device const size_t global = x_ne; CL_CHECK(clSetKernelArg(*to_fp32_cl, 0, sizeof(cl_mem), &d_Q)); CL_CHECK(clSetKernelArg(*to_fp32_cl, 1, sizeof(cl_mem), &d_X)); - CL_CHECK(clFinish(queue)); - CL_CHECK(clEnqueueNDRangeKernel(queue, *to_fp32_cl, 1, NULL, &global, NULL, 0, NULL, NULL)); + CL_CHECK(clEnqueueNDRangeKernel(queue, *to_fp32_cl, 1, NULL, &global, NULL, events.size(), !events.empty() ? events.data() : NULL, NULL)); // copy src1 to device CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i03, i02, NULL)); + events.emplace_back(); + // wait for conversion CL_CHECK(clFinish(queue)); @@ -910,7 +1037,7 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor * d_Y, 0, ne10, beta, d_D, 0, ne01, - &queue, &ev_sgemm); + &queue, events.data() + ev_idx++); if (status != clblast::StatusCode::kSuccess) { GGML_ASSERT(false); @@ -919,8 +1046,13 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor * // copy dst to host float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3); - CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(float) * d_ne, d, 1, &ev_sgemm, NULL)); - clReleaseEvent(ev_sgemm); + CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(float) * d_ne, d, 1, &events[events.size() - 1], NULL)); + for (auto *event : events) { + clReleaseEvent(event); + } + + ev_idx = 0; + events.clear(); } } @@ -1026,3 +1158,33 @@ void ggml_cl_transform_tensor(ggml_tensor * tensor) { tensor->data = dst; tensor->backend = GGML_BACKEND_CL; } + +void ggml_cl_load_data(const char * fname, struct ggml_tensor * tensor, const size_t offset) { + cl_int err; + FILE * fp = fopen(fname, "rb"); + + const size_t size = ggml_nbytes(tensor); + + cl_mem dst; + CL_CHECK((dst = clCreateBuffer(context, CL_MEM_READ_ONLY, size, nullptr, &err), err)); + void * buf_host = malloc(size); + +#ifdef _WIN32 + int ret = _fseeki64(fp, (__int64) offset, SEEK_SET); +#else + int ret = fseek(fp, (long) offset, SEEK_SET); +#endif + GGML_ASSERT(ret == 0); // same + + size_t ret2 = fread(buf_host, size, 1, fp); + if (ret2 != 1) { + fprintf(stderr, "unexpectedly reached end of file"); + exit(1); + } + + clEnqueueWriteBuffer(queue, dst, CL_TRUE, 0, size, buf_host, 0, nullptr, nullptr); + + tensor->data = dst; + free(buf_host); + fclose(fp); +} diff --git a/ggml-opencl.h b/ggml-opencl.h index 5a1a50093..c850bb8ad 100644 --- a/ggml-opencl.h +++ b/ggml-opencl.h @@ -8,6 +8,7 @@ extern "C" { void ggml_cl_init(void); +void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst); bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst); size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst); void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize); @@ -16,6 +17,7 @@ void * ggml_cl_host_malloc(size_t size); void ggml_cl_host_free(void * ptr); void ggml_cl_transform_tensor(struct ggml_tensor * tensor); +void ggml_cl_load_data(const char * fname, struct ggml_tensor * tensor, size_t offset); #ifdef __cplusplus } diff --git a/ggml.c b/ggml.c index 4cd0d7211..91552c94c 100644 --- a/ggml.c +++ b/ggml.c @@ -8134,6 +8134,13 @@ static void ggml_compute_forward_mul_f32( } return; } +#elif defined(GGML_USE_CLBLAST) + if (src1->backend == GGML_BACKEND_CL) { + if (ith == 0) { + ggml_cl_mul(src0, src1, dst); + } + return; + } #endif const int64_t nr = ggml_nrows(src0); diff --git a/llama.cpp b/llama.cpp index 47b4c8dd7..f70b26c0f 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1010,8 +1010,12 @@ static void llama_model_load_internal( } } -#ifdef GGML_USE_CUBLAS +#if defined(GGML_USE_CUBLAS) #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CUDA + fprintf(stderr, "%s: using CUDA for GPU acceleration\n", __func__); +#elif defined(GGML_USE_CLBLAST) +#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CL + fprintf(stderr, "%s: using OpenCL for GPU acceleration\n", __func__); #else #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CPU #endif @@ -1063,7 +1067,7 @@ static void llama_model_load_internal( layer.w2 = ml->get_tensor(layers_i + ".feed_forward.w2.weight", { n_ff, n_embd}, backend); layer.w3 = ml->get_tensor(layers_i + ".feed_forward.w3.weight", {n_embd, n_ff}, backend); - if (backend == GGML_BACKEND_CUDA) { + if (backend == LLAMA_BACKEND_OFFLOAD) { vram_total += ggml_nbytes(layer.attention_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) + ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.attention_norm) + @@ -1093,15 +1097,15 @@ static void llama_model_load_internal( fprintf(stderr, "%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__, mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0); -#ifdef GGML_USE_CUBLAS const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer)); - fprintf(stderr, "%s: [cublas] offloading %d layers to GPU\n", __func__, n_gpu); +#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) + fprintf(stderr, "%s: offloading %d layers to GPU\n", __func__, n_gpu); if (n_gpu_layers > (int) hparams.n_layer) { - fprintf(stderr, "%s: [cublas] offloading output layer to GPU\n", __func__); + fprintf(stderr, "%s: offloading output layer to GPU\n", __func__); } - fprintf(stderr, "%s: [cublas] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024); -#elif !defined(GGML_USE_CLBLAST) + fprintf(stderr, "%s: total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024); +#else (void) n_gpu_layers; #endif } @@ -1113,7 +1117,7 @@ static void llama_model_load_internal( ml->load_all_data(progress_callback, progress_callback_user_data, use_mlock ? &lctx.model.mlock_mmap : NULL); -#ifdef GGML_USE_CUBLAS +#if defined(GGML_USE_CUBLAS) { size_t done_size = 0; size_t data_size = 0; @@ -1136,29 +1140,24 @@ static void llama_model_load_internal( } #elif defined(GGML_USE_CLBLAST) { - const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer)); - - fprintf(stderr, "ggml_opencl: offloading %d layers to GPU\n", n_gpu); - - size_t vram_total = 0; - - for (int i = 0; i < n_gpu; ++i) { - const auto & layer = model.layers[i]; - - ggml_cl_transform_tensor(layer.wq); vram_total += ggml_nbytes(layer.wq); - ggml_cl_transform_tensor(layer.wk); vram_total += ggml_nbytes(layer.wk); - ggml_cl_transform_tensor(layer.wv); vram_total += ggml_nbytes(layer.wv); - ggml_cl_transform_tensor(layer.wo); vram_total += ggml_nbytes(layer.wo); - ggml_cl_transform_tensor(layer.w1); vram_total += ggml_nbytes(layer.w1); - ggml_cl_transform_tensor(layer.w2); vram_total += ggml_nbytes(layer.w2); - ggml_cl_transform_tensor(layer.w3); vram_total += ggml_nbytes(layer.w3); + size_t done_size = 0; + size_t data_size = 0; + for (llama_load_tensor & lt : ml->tensors_map.tensors) { + data_size += lt.size; + if (lt.ggml_tensor->backend == GGML_BACKEND_CPU) { + done_size += lt.size; + } } - if (n_gpu_layers > (int) hparams.n_layer) { - fprintf(stderr, "ggml_opencl: offloading output layer to GPU\n"); - ggml_cl_transform_tensor(model.output); vram_total += ggml_nbytes(model.output); + for (llama_load_tensor & lt : ml->tensors_map.tensors) { + if (lt.ggml_tensor->backend != GGML_BACKEND_CL) { + continue; + } + if (progress_callback) { + progress_callback((float) done_size / data_size, progress_callback_user_data); + } + ggml_cl_load_data(fname.c_str(), lt.ggml_tensor, lt.shards.at(0).file_off); + done_size += lt.size; } - - fprintf(stderr, "ggml_opencl: total VRAM used: %zu MB\n", vram_total / 1024 / 1024); } #endif