diff --git a/CHANGELOG b/CHANGELOG index d018e211..7973655b 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -6,6 +6,7 @@ Development version (next release) - Fixed a bug with a size_t and cl_ulong mismatch on 32-bit systems - Fixed a bug related to the cache and retrieval of programs based on the OpenCL context - Fixed a performance issue (caused by fp16 support) by optimizing alpha/beta parameter passing to kernels +- Fixed a bug in the OpenCL kernels: now placing __kernel before __attribute__ - Added an option (-warm_up) to do a warm-up run before timing in the performance clients - Improved performance significantly of rotated GEMV computations - Added tuned parameters for various devices (see README) diff --git a/README.md b/README.md index 1b3adcb9..04a8e470 100644 --- a/README.md +++ b/README.md @@ -286,6 +286,7 @@ The contributing authors (code, pull requests, testing) so far are: * [Marco Hutter](https://github.com/gpus) * [Hugh Perkins](https://github.com/hughperkins) * [Gian-Carlo Pascutto](https://github.com/gcp) +* [Dimitri VA](https://github.com/dvasschemacq) Tuning and testing on a variety of OpenCL devices was made possible by: diff --git a/src/kernels/level1/xamax.opencl b/src/kernels/level1/xamax.opencl index 48d0eb5c..48ad2e75 100644 --- a/src/kernels/level1/xamax.opencl +++ b/src/kernels/level1/xamax.opencl @@ -30,10 +30,10 @@ R"( // ================================================================================================= // The main reduction kernel, performing the loading and the majority of the operation -__attribute__((reqd_work_group_size(WGS1, 1, 1))) -__kernel void Xamax(const int n, - const __global real* restrict xgm, const int x_offset, const int x_inc, - __global singlereal* maxgm, __global unsigned int* imaxgm) { +__kernel __attribute__((reqd_work_group_size(WGS1, 1, 1))) +void Xamax(const int n, + const __global real* restrict xgm, const int x_offset, const int x_inc, + __global singlereal* maxgm, __global unsigned int* imaxgm) { __local singlereal maxlm[WGS1]; __local unsigned int imaxlm[WGS1]; const int lid = get_local_id(0); @@ -95,10 +95,10 @@ __kernel void Xamax(const int n, // The epilogue reduction kernel, performing the final bit of the operation. This kernel has to // be launched with a single workgroup only. -__attribute__((reqd_work_group_size(WGS2, 1, 1))) -__kernel void XamaxEpilogue(const __global singlereal* restrict maxgm, - const __global unsigned int* restrict imaxgm, - __global unsigned int* imax, const int imax_offset) { +__kernel __attribute__((reqd_work_group_size(WGS2, 1, 1))) +void XamaxEpilogue(const __global singlereal* restrict maxgm, + const __global unsigned int* restrict imaxgm, + __global unsigned int* imax, const int imax_offset) { __local singlereal maxlm[WGS2]; __local unsigned int imaxlm[WGS2]; const int lid = get_local_id(0); diff --git a/src/kernels/level1/xasum.opencl b/src/kernels/level1/xasum.opencl index 58d0f11b..1fc91be8 100644 --- a/src/kernels/level1/xasum.opencl +++ b/src/kernels/level1/xasum.opencl @@ -30,10 +30,10 @@ R"( // ================================================================================================= // The main reduction kernel, performing the loading and the majority of the operation -__attribute__((reqd_work_group_size(WGS1, 1, 1))) -__kernel void Xasum(const int n, - const __global real* restrict xgm, const int x_offset, const int x_inc, - __global real* output) { +__kernel __attribute__((reqd_work_group_size(WGS1, 1, 1))) +void Xasum(const int n, + const __global real* restrict xgm, const int x_offset, const int x_inc, + __global real* output) { __local real lm[WGS1]; const int lid = get_local_id(0); const int wgid = get_group_id(0); @@ -74,9 +74,9 @@ __kernel void Xasum(const int n, // The epilogue reduction kernel, performing the final bit of the operation. This kernel has to // be launched with a single workgroup only. -__attribute__((reqd_work_group_size(WGS2, 1, 1))) -__kernel void XasumEpilogue(const __global real* restrict input, - __global real* asum, const int asum_offset) { +__kernel __attribute__((reqd_work_group_size(WGS2, 1, 1))) +void XasumEpilogue(const __global real* restrict input, + __global real* asum, const int asum_offset) { __local real lm[WGS2]; const int lid = get_local_id(0); diff --git a/src/kernels/level1/xaxpy.opencl b/src/kernels/level1/xaxpy.opencl index d533041b..ece8476e 100644 --- a/src/kernels/level1/xaxpy.opencl +++ b/src/kernels/level1/xaxpy.opencl @@ -22,10 +22,10 @@ R"( // ================================================================================================= // Full version of the kernel with offsets and strided accesses -__attribute__((reqd_work_group_size(WGS, 1, 1))) -__kernel void Xaxpy(const int n, const real_arg arg_alpha, - const __global real* restrict xgm, const int x_offset, const int x_inc, - __global real* ygm, const int y_offset, const int y_inc) { +__kernel __attribute__((reqd_work_group_size(WGS, 1, 1))) +void Xaxpy(const int n, const real_arg arg_alpha, + const __global real* restrict xgm, const int x_offset, const int x_inc, + __global real* ygm, const int y_offset, const int y_inc) { const real alpha = GetRealArg(arg_alpha); // Loops over the work that needs to be done (allows for an arbitrary number of threads) @@ -40,10 +40,10 @@ __kernel void Xaxpy(const int n, const real_arg arg_alpha, // Faster version of the kernel without offsets and strided accesses. Also assumes that 'n' is // dividable by 'VW', 'WGS' and 'WPT'. -__attribute__((reqd_work_group_size(WGS, 1, 1))) -__kernel void XaxpyFast(const int n, const real_arg arg_alpha, - const __global realV* restrict xgm, - __global realV* ygm) { +__kernel __attribute__((reqd_work_group_size(WGS, 1, 1))) +void XaxpyFast(const int n, const real_arg arg_alpha, + const __global realV* restrict xgm, + __global realV* ygm) { const real alpha = GetRealArg(arg_alpha); #pragma unroll diff --git a/src/kernels/level1/xcopy.opencl b/src/kernels/level1/xcopy.opencl index 97c27ccf..228e0735 100644 --- a/src/kernels/level1/xcopy.opencl +++ b/src/kernels/level1/xcopy.opencl @@ -22,10 +22,10 @@ R"( // ================================================================================================= // Full version of the kernel with offsets and strided accesses -__attribute__((reqd_work_group_size(WGS, 1, 1))) -__kernel void Xcopy(const int n, - const __global real* restrict xgm, const int x_offset, const int x_inc, - __global real* ygm, const int y_offset, const int y_inc) { +__kernel __attribute__((reqd_work_group_size(WGS, 1, 1))) +void Xcopy(const int n, + const __global real* restrict xgm, const int x_offset, const int x_inc, + __global real* ygm, const int y_offset, const int y_inc) { // Loops over the work that needs to be done (allows for an arbitrary number of threads) #pragma unroll @@ -38,10 +38,10 @@ __kernel void Xcopy(const int n, // Faster version of the kernel without offsets and strided accesses. Also assumes that 'n' is // dividable by 'VW', 'WGS' and 'WPT'. -__attribute__((reqd_work_group_size(WGS, 1, 1))) -__kernel void XcopyFast(const int n, - const __global realV* restrict xgm, - __global realV* ygm) { +__kernel __attribute__((reqd_work_group_size(WGS, 1, 1))) +void XcopyFast(const int n, + const __global realV* restrict xgm, + __global realV* ygm) { #pragma unroll for (int w=0; w 'a_ld' is a multiple of VW2 // --> 'a_rotated' is 0 // --> 'do_conjugate' is 0 -__attribute__((reqd_work_group_size(WGS2, 1, 1))) -__kernel void XgemvFast(const int m, const int n, - const real_arg arg_alpha, - const real_arg arg_beta, - const int a_rotated, - const __global realVF* restrict agm, const int a_offset, const int a_ld, - const __global real* restrict xgm, const int x_offset, const int x_inc, - __global real* ygm, const int y_offset, const int y_inc, - const int do_conjugate, const int parameter, - const int kl_unused, const int ku_unused) { +__kernel __attribute__((reqd_work_group_size(WGS2, 1, 1))) +void XgemvFast(const int m, const int n, + const real_arg arg_alpha, + const real_arg arg_beta, + const int a_rotated, + const __global realVF* restrict agm, const int a_offset, const int a_ld, + const __global real* restrict xgm, const int x_offset, const int x_inc, + __global real* ygm, const int y_offset, const int y_inc, + const int do_conjugate, const int parameter, + const int kl_unused, const int ku_unused) { const real alpha = GetRealArg(arg_alpha); const real beta = GetRealArg(arg_beta); @@ -190,16 +190,16 @@ __kernel void XgemvFast(const int m, const int n, // --> 'a_ld' is a multiple of VW3 // --> 'a_rotated' is 1 // --> 'do_conjugate' is 0 -__attribute__((reqd_work_group_size(WGS3, 1, 1))) -__kernel void XgemvFastRot(const int m, const int n, - const real_arg arg_alpha, - const real_arg arg_beta, - const int a_rotated, - const __global realVFR* restrict agm, const int a_offset, const int a_ld, - const __global real* restrict xgm, const int x_offset, const int x_inc, - __global real* ygm, const int y_offset, const int y_inc, - const int do_conjugate, const int parameter, - const int kl_unused, const int ku_unused) { +__kernel __attribute__((reqd_work_group_size(WGS3, 1, 1))) +void XgemvFastRot(const int m, const int n, + const real_arg arg_alpha, + const real_arg arg_beta, + const int a_rotated, + const __global realVFR* restrict agm, const int a_offset, const int a_ld, + const __global real* restrict xgm, const int x_offset, const int x_inc, + __global real* ygm, const int y_offset, const int y_inc, + const int do_conjugate, const int parameter, + const int kl_unused, const int ku_unused) { const real alpha = GetRealArg(arg_alpha); const real beta = GetRealArg(arg_beta); diff --git a/src/kernels/level2/xger.opencl b/src/kernels/level2/xger.opencl index f218a346..1b9ded12 100644 --- a/src/kernels/level2/xger.opencl +++ b/src/kernels/level2/xger.opencl @@ -18,13 +18,13 @@ R"( // ================================================================================================= // Regular version of the rank-1 matrix update kernel (GER, GERU, GERC) -__attribute__((reqd_work_group_size(WGS1, WGS2, 1))) -__kernel void Xger(const int max1, const int max2, - const real_arg arg_alpha, - const __global real* restrict xgm, const int x_offset, const int x_inc, - const __global real* ygm, const int y_offset, const int y_inc, - __global real* restrict agm, const int a_offset, const int a_ld, - const int is_rowmajor) { +__kernel __attribute__((reqd_work_group_size(WGS1, WGS2, 1))) +void Xger(const int max1, const int max2, + const real_arg arg_alpha, + const __global real* restrict xgm, const int x_offset, const int x_inc, + const __global real* ygm, const int y_offset, const int y_inc, + __global real* restrict agm, const int a_offset, const int a_ld, + const int is_rowmajor) { const real alpha = GetRealArg(arg_alpha); // Register storage for X and Y diff --git a/src/kernels/level2/xher.opencl b/src/kernels/level2/xher.opencl index 1200ee63..b0772218 100644 --- a/src/kernels/level2/xher.opencl +++ b/src/kernels/level2/xher.opencl @@ -18,12 +18,12 @@ R"( // ================================================================================================= // Symmetric version of the rank-1 matrix update kernel (HER, HPR, SYR, SPR) -__attribute__((reqd_work_group_size(WGS1, WGS2, 1))) -__kernel void Xher(const int n, - const real_arg arg_alpha, - const __global real* restrict xgm, const int x_offset, const int x_inc, - __global real* restrict agm, const int a_offset, const int a_ld, - const int is_upper, const int is_rowmajor) { +__kernel __attribute__((reqd_work_group_size(WGS1, WGS2, 1))) +void Xher(const int n, + const real_arg arg_alpha, + const __global real* restrict xgm, const int x_offset, const int x_inc, + __global real* restrict agm, const int a_offset, const int a_ld, + const int is_upper, const int is_rowmajor) { const real alpha = GetRealArg(arg_alpha); // Register storage for X and XT diff --git a/src/kernels/level2/xher2.opencl b/src/kernels/level2/xher2.opencl index d0f41571..00a756c9 100644 --- a/src/kernels/level2/xher2.opencl +++ b/src/kernels/level2/xher2.opencl @@ -18,13 +18,13 @@ R"( // ================================================================================================= // Symmetric version of the rank-2 matrix update kernel (HER2, HPR2, SYR2, SPR2) -__attribute__((reqd_work_group_size(WGS1, WGS2, 1))) -__kernel void Xher2(const int n, - const real_arg arg_alpha, - const __global real* restrict xgm, const int x_offset, const int x_inc, - const __global real* restrict ygm, const int y_offset, const int y_inc, - __global real* restrict agm, const int a_offset, const int a_ld, - const int is_upper, const int is_rowmajor) { +__kernel __attribute__((reqd_work_group_size(WGS1, WGS2, 1))) +void Xher2(const int n, + const real_arg arg_alpha, + const __global real* restrict xgm, const int x_offset, const int x_inc, + const __global real* restrict ygm, const int y_offset, const int y_inc, + __global real* restrict agm, const int a_offset, const int a_ld, + const int is_upper, const int is_rowmajor) { const real alpha = GetRealArg(arg_alpha); // Register storage for X and Y diff --git a/src/kernels/level3/convert_hermitian.opencl b/src/kernels/level3/convert_hermitian.opencl index 53cc161a..ed2ded98 100644 --- a/src/kernels/level3/convert_hermitian.opencl +++ b/src/kernels/level3/convert_hermitian.opencl @@ -20,13 +20,13 @@ R"( // Kernel to populate a squared hermitian matrix, given that the triangle which holds the data is // stored as the lower-triangle of the input matrix. This uses the padding kernel's parameters. -__attribute__((reqd_work_group_size(PAD_DIMX, PAD_DIMY, 1))) -__kernel void HermLowerToSquared(const int src_dim, - const int src_ld, const int src_offset, - __global const real* restrict src, - const int dest_dim, - const int dest_ld, const int dest_offset, - __global real* dest) { +__kernel __attribute__((reqd_work_group_size(PAD_DIMX, PAD_DIMY, 1))) +void HermLowerToSquared(const int src_dim, + const int src_ld, const int src_offset, + __global const real* restrict src, + const int dest_dim, + const int dest_ld, const int dest_offset, + __global real* dest) { // Loops over the work per thread in both dimensions #pragma unroll @@ -59,13 +59,13 @@ __kernel void HermLowerToSquared(const int src_dim, } // Same as above, but now the matrix' data is stored in the upper-triangle -__attribute__((reqd_work_group_size(PAD_DIMX, PAD_DIMY, 1))) -__kernel void HermUpperToSquared(const int src_dim, - const int src_ld, const int src_offset, - __global const real* restrict src, - const int dest_dim, - const int dest_ld, const int dest_offset, - __global real* dest) { +__kernel __attribute__((reqd_work_group_size(PAD_DIMX, PAD_DIMY, 1))) +void HermUpperToSquared(const int src_dim, + const int src_ld, const int src_offset, + __global const real* restrict src, + const int dest_dim, + const int dest_ld, const int dest_offset, + __global real* dest) { // Loops over the work per thread in both dimensions #pragma unroll diff --git a/src/kernels/level3/convert_symmetric.opencl b/src/kernels/level3/convert_symmetric.opencl index c6ce93ca..8ae53b37 100644 --- a/src/kernels/level3/convert_symmetric.opencl +++ b/src/kernels/level3/convert_symmetric.opencl @@ -20,13 +20,13 @@ R"( // Kernel to populate a squared symmetric matrix, given that the triangle which holds the data is // stored as the lower-triangle of the input matrix. This uses the padding kernel's parameters. -__attribute__((reqd_work_group_size(PAD_DIMX, PAD_DIMY, 1))) -__kernel void SymmLowerToSquared(const int src_dim, - const int src_ld, const int src_offset, - __global const real* restrict src, - const int dest_dim, - const int dest_ld, const int dest_offset, - __global real* dest) { +__kernel __attribute__((reqd_work_group_size(PAD_DIMX, PAD_DIMY, 1))) +void SymmLowerToSquared(const int src_dim, + const int src_ld, const int src_offset, + __global const real* restrict src, + const int dest_dim, + const int dest_ld, const int dest_offset, + __global real* dest) { // Loops over the work per thread in both dimensions #pragma unroll @@ -53,13 +53,13 @@ __kernel void SymmLowerToSquared(const int src_dim, } // Same as above, but now the matrix' data is stored in the upper-triangle -__attribute__((reqd_work_group_size(PAD_DIMX, PAD_DIMY, 1))) -__kernel void SymmUpperToSquared(const int src_dim, - const int src_ld, const int src_offset, - __global const real* restrict src, - const int dest_dim, - const int dest_ld, const int dest_offset, - __global real* dest) { +__kernel __attribute__((reqd_work_group_size(PAD_DIMX, PAD_DIMY, 1))) +void SymmUpperToSquared(const int src_dim, + const int src_ld, const int src_offset, + __global const real* restrict src, + const int dest_dim, + const int dest_ld, const int dest_offset, + __global real* dest) { // Loops over the work per thread in both dimensions #pragma unroll diff --git a/src/kernels/level3/convert_triangular.opencl b/src/kernels/level3/convert_triangular.opencl index fdd2461a..f848dcc1 100644 --- a/src/kernels/level3/convert_triangular.opencl +++ b/src/kernels/level3/convert_triangular.opencl @@ -20,14 +20,14 @@ R"( // Kernel to populate a squared triangular matrix, given that the triangle which holds the data is // stored as the lower-triangle of the input matrix. This uses the padding kernel's parameters. -__attribute__((reqd_work_group_size(PAD_DIMX, PAD_DIMY, 1))) -__kernel void TriaLowerToSquared(const int src_dim, - const int src_ld, const int src_offset, - __global const real* restrict src, - const int dest_dim, - const int dest_ld, const int dest_offset, - __global real* dest, - const int unit_diagonal) { +__kernel __attribute__((reqd_work_group_size(PAD_DIMX, PAD_DIMY, 1))) +void TriaLowerToSquared(const int src_dim, + const int src_ld, const int src_offset, + __global const real* restrict src, + const int dest_dim, + const int dest_ld, const int dest_offset, + __global real* dest, + const int unit_diagonal) { // Loops over the work per thread in both dimensions #pragma unroll @@ -55,14 +55,14 @@ __kernel void TriaLowerToSquared(const int src_dim, } // Same as above, but now the matrix' data is stored in the upper-triangle -__attribute__((reqd_work_group_size(PAD_DIMX, PAD_DIMY, 1))) -__kernel void TriaUpperToSquared(const int src_dim, - const int src_ld, const int src_offset, - __global const real* restrict src, - const int dest_dim, - const int dest_ld, const int dest_offset, - __global real* dest, - const int unit_diagonal) { +__kernel __attribute__((reqd_work_group_size(PAD_DIMX, PAD_DIMY, 1))) +void TriaUpperToSquared(const int src_dim, + const int src_ld, const int src_offset, + __global const real* restrict src, + const int dest_dim, + const int dest_ld, const int dest_offset, + __global real* dest, + const int unit_diagonal) { // Loops over the work per thread in both dimensions #pragma unroll diff --git a/src/kernels/level3/copy_fast.opencl b/src/kernels/level3/copy_fast.opencl index dd975bf1..695b9003 100644 --- a/src/kernels/level3/copy_fast.opencl +++ b/src/kernels/level3/copy_fast.opencl @@ -35,11 +35,11 @@ R"( // Fast copy kernel. Requires 'ld' and the number of threads in dimension 0 to be a multiple of // COPY_VW. Also requires both matrices to be of the same dimensions and without offset. -__attribute__((reqd_work_group_size(COPY_DIMX, COPY_DIMY, 1))) -__kernel void CopyMatrixFast(const int ld, - __global const realC* restrict src, - __global realC* dest, - const real_arg arg_alpha) { +__kernel __attribute__((reqd_work_group_size(COPY_DIMX, COPY_DIMY, 1))) +void CopyMatrixFast(const int ld, + __global const realC* restrict src, + __global realC* dest, + const real_arg arg_alpha) { const real alpha = GetRealArg(arg_alpha); #pragma unroll for (int w_one=0; w_one