Merge pull request #204 from CNugteren/cuda_api

Cuda API to CLBlast
pull/206/head
Cedric Nugteren 2017-10-20 12:07:30 +02:00 committed by GitHub
commit 42dcd8fd8a
92 changed files with 5578 additions and 829 deletions

View File

@ -1,5 +1,9 @@
Development (next version)
- Added a CUDA API to CLBlast:
* The library and kernels can be compiled with the CUDA driver API and NVRTC (requires CUDA 7.5)
* Two CUDA API sample programs are added: SGEMM and DAXPY
* All correctness tests and performance clients work on CUDA like they did for OpenCL
- Kernels are now cached based on their tuning parameters: fits the use-case of 'OverrideParameters'
- Improved performance for small GEMM problems by going from 3 to 1 optional temporary buffers
- Various minor fixes and enhancements

View File

@ -33,6 +33,23 @@ option(TESTS "Enable compilation of the correctness tests" OFF)
option(NETLIB "Enable compilation of the CBLAS Netlib API" OFF)
option(CUBLAS "Enables performance comparison against cuBLAS on NVIDIA GPUs" OFF)
# Select between an OpenCL API (default) or a CUDA API (beta)
option(OPENCL "Build CLBlast with an OpenCL API (default)" ON)
option(CUDA "Build CLBlast with a CUDA API (beta)" OFF)
if(NOT OPENCL AND NOT CUDA)
message(FATAL_ERROR "No API selected, choose from OpenCL (-DOPENCL=ON) or CUDA (-DCUDA=ON)")
endif()
if(OPENCL AND CUDA)
message(FATAL_ERROR "Multiple APIs selected, choose either OpenCL (-DOPENCL=ON -DCUDA=OFF) or CUDA (-DCUDA=ON -DOPENCL=OFF)")
endif()
if(OPENCL)
message("-- Building CLBlast with OpenCL API (default)")
add_definitions(-DOPENCL_API)
elseif(CUDA)
message("-- Building CLBlast with CUDA API (beta)")
add_definitions(-DCUDA_API)
endif()
# Compile in verbose mode with additional diagnostic messages
option(VERBOSE "Compile in verbose mode for additional diagnostic messages" OFF)
if(VERBOSE)
@ -126,8 +143,18 @@ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${CFLAGS}")
# Package scripts location
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${clblast_SOURCE_DIR}/cmake/Modules/")
# Requires OpenCL. It is found through the included "FindOpenCL.cmake" in CMAKE_MODULE_PATH.
find_package(OpenCL REQUIRED)
if(OPENCL)
# Requires OpenCL. It is found through the included "FindOpenCL.cmake" in CMAKE_MODULE_PATH.
find_package(OpenCL REQUIRED)
set(API_LIBRARIES ${OPENCL_LIBRARIES})
set(API_INCLUDE_DIRS ${OPENCL_INCLUDE_DIRS})
elseif(CUDA)
# For CUDA, the "FindCUDA.cmake" is part of CMake
find_package(CUDA REQUIRED)
set(API_LIBRARIES cuda nvrtc)
set(API_INCLUDE_DIRS ${CUDA_INCLUDE_DIRS})
link_directories(${CUDA_TOOLKIT_ROOT_DIR}/lib64)
endif()
# Locates the CLTune library in case the tuners need to be compiled. "FindCLTune.cmake" is included.
if(TUNERS)
@ -141,8 +168,10 @@ endif()
# Locates the reference BLAS libraries in case the tests need to be compiled. The "FindclBLAS.cmake",
# "FindCBLAS.cmake" and "FindcuBLAS.cmake" are included.
if(CLIENTS OR TESTS)
find_package(clBLAS)
find_package(CBLAS)
if(OPENCL)
find_package(clBLAS)
endif()
if(CUBLAS)
find_package(cuBLAS)
endif()
@ -164,11 +193,6 @@ set(KERNELS copy_fast copy_pad transpose_fast transpose_pad xaxpy xdot xger
xgemm xgemm_direct xgemv)
set(DATABASES copy pad padtranspose transpose xaxpy xdot
xgemm xgemm_direct xgemv xgemv_fast xgemv_fast_rot xger)
set(SAMPLE_PROGRAMS_CPP sgemm sgemm_batched)
set(SAMPLE_PROGRAMS_C sasum dgemv sgemm haxpy cache)
if(NETLIB)
set(SAMPLE_PROGRAMS_C ${SAMPLE_PROGRAMS_C} sgemm_netlib)
endif()
set(LEVEL1_ROUTINES xswap xscal xcopy xaxpy xdot xdotu xdotc xnrm2 xasum xamax)
set(LEVEL2_ROUTINES xgemv xgbmv xhemv xhbmv xhpmv xsymv xsbmv xspmv xtrmv xtbmv xtpmv xtrsv
xger xgeru xgerc xher xhpr xher2 xhpr2 xsyr xspr xsyr2 xspr2)
@ -177,6 +201,18 @@ set(LEVELX_ROUTINES xomatcopy xim2col xaxpybatched xgemmbatched)
set(ROUTINES ${LEVEL1_ROUTINES} ${LEVEL2_ROUTINES} ${LEVEL3_ROUTINES} ${LEVELX_ROUTINES})
set(PRECISIONS 32 64 3232 6464 16)
# Sample programs
if(OPENCL)
set(SAMPLE_PROGRAMS_CPP sgemm sgemm_batched)
set(SAMPLE_PROGRAMS_C sasum dgemv sgemm haxpy cache)
if(NETLIB)
set(SAMPLE_PROGRAMS_C ${SAMPLE_PROGRAMS_C} sgemm_netlib)
endif()
elseif(CUDA)
set(SAMPLE_PROGRAMS_CPP daxpy_cuda sgemm_cuda)
set(SAMPLE_PROGRAMS_C )
endif()
# ==================================================================================================
# Gathers all source-files (required for the compiler) and header-files (for IDEs only)
@ -185,15 +221,12 @@ set(SOURCES
src/routines/common.cpp
src/utilities/clblast_exceptions.cpp
src/utilities/utilities.cpp
src/api_common.cpp
src/cache.cpp
src/clblast.cpp
src/clblast_c.cpp
src/routine.cpp
src/routines/levelx/xinvert.cpp # only source, don't include it as a test
)
set(HEADERS # such that they can be discovered by IDEs such as CLion and Visual Studio
include/clblast.h
include/clblast_c.h
include/clblast_half.h
src/database/apple_cpu_fallback.hpp
src/database/database.hpp
@ -204,19 +237,26 @@ set(HEADERS # such that they can be discovered by IDEs such as CLion and Visual
src/routines/level1/xmin.hpp
src/routines/level1/xsum.hpp
src/routines/common.hpp
src/routines/routines.hpp
src/utilities/buffer_test.hpp
src/utilities/clblast_exceptions.hpp
src/utilities/device_mapping.hpp
src/utilities/msvc.hpp
src/utilities/utilities.hpp
src/cache.hpp
src/clpp11.hpp
src/cxpp11_common.hpp
src/routine.hpp
)
if(NETLIB)
set(SOURCES ${SOURCES} src/clblast_netlib_c.cpp)
set(HEADERS ${HEADERS} include/clblast_netlib_c.h)
if(OPENCL)
set(SOURCES ${SOURCES} src/clblast.cpp src/clblast_c.cpp)
set(HEADERS ${HEADERS} include/clblast.h include/clblast_c.h src/clpp11.hpp)
if(NETLIB)
set(SOURCES ${SOURCES} src/clblast_netlib_c.cpp)
set(HEADERS ${HEADERS} include/clblast_netlib_c.h)
endif()
elseif(CUDA)
set(SOURCES ${SOURCES} src/clblast_cuda.cpp)
set(HEADERS ${HEADERS} include/clblast_cuda.h src/cupp11.hpp)
endif()
foreach(ROUTINE ${LEVEL1_ROUTINES})
set(SOURCES ${SOURCES} src/routines/level1/${ROUTINE}.cpp)
@ -250,14 +290,14 @@ else(BUILD_SHARED_LIBS)
add_library(clblast STATIC ${SOURCES} ${HEADERS})
endif()
target_link_libraries(clblast ${OPENCL_LIBRARIES})
target_link_libraries(clblast ${API_LIBRARIES})
# Includes directories: CLBlast and OpenCL
target_include_directories(clblast PUBLIC
$<BUILD_INTERFACE:${clblast_SOURCE_DIR}/include>
$<BUILD_INTERFACE:${clblast_SOURCE_DIR}/src>
$<INSTALL_INTERFACE:include>
${OPENCL_INCLUDE_DIRS})
${API_INCLUDE_DIRS})
# Sets the proper __declspec(dllexport) keyword for Visual Studio when the library is built
if(MSVC)
@ -268,11 +308,15 @@ endif()
# Installs the library
install(TARGETS clblast EXPORT CLBlast DESTINATION lib)
install(FILES include/clblast.h DESTINATION include)
install(FILES include/clblast_c.h DESTINATION include)
install(FILES include/clblast_half.h DESTINATION include)
if(NETLIB)
install(FILES include/clblast_netlib_c.h DESTINATION include)
if(OPENCL)
install(FILES include/clblast.h DESTINATION include)
install(FILES include/clblast_c.h DESTINATION include)
if(NETLIB)
install(FILES include/clblast_netlib_c.h DESTINATION include)
endif()
elseif(CUDA)
install(FILES include/clblast_cuda.h DESTINATION include)
endif()
# Installs the config for find_package in dependent projects
@ -292,19 +336,21 @@ endif()
if(SAMPLES)
# Downloads the cl.hpp file from Khronos
file(DOWNLOAD https://www.khronos.org/registry/OpenCL/api/2.1/cl.hpp ${clblast_SOURCE_DIR}/samples/cl.hpp)
if(OPENCL)
file(DOWNLOAD https://www.khronos.org/registry/OpenCL/api/2.1/cl.hpp ${clblast_SOURCE_DIR}/samples/cl.hpp)
endif()
# Adds sample programs (C++)
foreach(SAMPLE ${SAMPLE_PROGRAMS_CPP})
add_executable(clblast_sample_${SAMPLE} samples/${SAMPLE}.cpp)
target_link_libraries(clblast_sample_${SAMPLE} clblast ${OPENCL_LIBRARIES})
target_link_libraries(clblast_sample_${SAMPLE} clblast ${API_LIBRARIES})
install(TARGETS clblast_sample_${SAMPLE} DESTINATION bin)
endforeach()
# Adds sample programs (C)
foreach(SAMPLE ${SAMPLE_PROGRAMS_C})
add_executable(clblast_sample_${SAMPLE}_c samples/${SAMPLE}.c)
target_link_libraries(clblast_sample_${SAMPLE}_c clblast ${OPENCL_LIBRARIES})
target_link_libraries(clblast_sample_${SAMPLE}_c clblast ${API_LIBRARIES})
install(TARGETS clblast_sample_${SAMPLE}_c DESTINATION bin)
endforeach()
@ -325,7 +371,7 @@ if(TUNERS)
# Adds tuning executables
foreach(KERNEL ${KERNELS})
add_executable(clblast_tuner_${KERNEL} ${TUNERS_COMMON} src/tuning/kernels/${KERNEL}.cpp)
target_link_libraries(clblast_tuner_${KERNEL} clblast ${CLTUNE_LIBRARIES} ${OPENCL_LIBRARIES})
target_link_libraries(clblast_tuner_${KERNEL} clblast ${CLTUNE_LIBRARIES} ${API_LIBRARIES})
target_include_directories(clblast_tuner_${KERNEL} PUBLIC ${CLTUNE_INCLUDE_DIRS})
install(TARGETS clblast_tuner_${KERNEL} DESTINATION bin)
endforeach()
@ -430,7 +476,7 @@ if(CLIENTS)
test/routines/levelx/${ROUTINE}.hpp)
endforeach()
foreach(ROUTINE ${ROUTINES})
target_link_libraries(clblast_client_${ROUTINE} clblast ${REF_LIBRARIES} ${OPENCL_LIBRARIES})
target_link_libraries(clblast_client_${ROUTINE} clblast ${REF_LIBRARIES} ${API_LIBRARIES})
target_include_directories(clblast_client_${ROUTINE} PUBLIC ${clblast_SOURCE_DIR} ${REF_INCLUDES})
install(TARGETS clblast_client_${ROUTINE} DESTINATION bin)
endforeach()
@ -482,7 +528,7 @@ if(TESTS)
test/routines/levelx/${ROUTINE}.hpp)
endforeach()
foreach(ROUTINE ${ROUTINES})
target_link_libraries(clblast_test_${ROUTINE} clblast ${REF_LIBRARIES} ${OPENCL_LIBRARIES})
target_link_libraries(clblast_test_${ROUTINE} clblast ${REF_LIBRARIES} ${API_LIBRARIES})
install(TARGETS clblast_test_${ROUTINE} DESTINATION bin)
target_include_directories(clblast_test_${ROUTINE} PUBLIC ${clblast_SOURCE_DIR} ${REF_INCLUDES})
add_test(clblast_test_${ROUTINE} clblast_test_${ROUTINE})
@ -493,7 +539,7 @@ if(TESTS)
foreach(MISC_TEST ${MISC_TESTS})
add_executable(clblast_test_${MISC_TEST} ${TESTS_COMMON}
test/correctness/misc/${MISC_TEST}.cpp)
target_link_libraries(clblast_test_${MISC_TEST} clblast ${REF_LIBRARIES} ${OPENCL_LIBRARIES})
target_link_libraries(clblast_test_${MISC_TEST} clblast ${REF_LIBRARIES} ${API_LIBRARIES})
target_include_directories(clblast_test_${MISC_TEST} PUBLIC
$<TARGET_PROPERTY:clblast,INTERFACE_INCLUDE_DIRECTORIES>
${clblast_SOURCE_DIR} ${REF_INCLUDES})
@ -502,7 +548,7 @@ if(TESTS)
# CLBlast diagnostics
add_executable(clblast_test_diagnostics ${TESTS_COMMON} test/diagnostics.cpp)
target_link_libraries(clblast_test_diagnostics clblast ${REF_LIBRARIES} ${OPENCL_LIBRARIES})
target_link_libraries(clblast_test_diagnostics clblast ${REF_LIBRARIES} ${API_LIBRARIES})
target_include_directories(clblast_test_diagnostics PUBLIC
$<TARGET_PROPERTY:clblast,INTERFACE_INCLUDE_DIRECTORIES>
${clblast_SOURCE_DIR} ${REF_INCLUDES})

View File

@ -99,11 +99,23 @@ To get started quickly, a couple of stand-alone example programs are included in
cmake -DSAMPLES=ON ..
For all of CLBlast's APIs, it is possible to optionally set an OS environmental variable `CLBLAST_BUILD_OPTIONS` to pass specific build options to the OpenCL compiler.
Using the library (Netlib API)
-------------
There is also a Netlib CBLAS C API available. This is however not recommended for full control over performance, since at every call it will copy all buffers to and from the OpenCL device. Especially for level 1 and level 2 BLAS functions performance will be impacted severely. However, it can be useful if you don't want to touch OpenCL at all. You can set the default device and platform by setting the `CLBLAST_DEVICE` and `CLBLAST_PLATFORM` environmental variables. This API can be used as follows after providing the `-DNETLIB=ON` flag to CMake:
#include <clblast_netlib_c.h>
For all of CLBlast's APIs, it is possible to optionally set an OS environmental variable `CLBLAST_BUILD_OPTIONS` to pass specific build options to the OpenCL compiler.
Using the library (CUDA API)
-------------
There is also a CUDA API of CLBlast available. Enabling this compiles the whole library for CUDA and thus replaces the OpenCL API. It is based upon the CUDA runtime and NVRTC APIs, requiring NVIDIA CUDA 7.5 or higher. The CUDA version of the library can be used as follows after providing the `-DCUDA=ON -DOPENCL=OFF` flags to CMake:
#include <clblast_cuda.h>
Using the tuners (optional)

View File

@ -0,0 +1,643 @@
// =================================================================================================
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
// width of 100 characters per line.
//
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file contains the special CUDA interface to the CLBlast BLAS routines. It also contains the
// definitions of the returned status codes and the layout and transpose types. This is the header
// users of the CUDA API of CLBlast should include and use.
//
// =================================================================================================
#ifndef CLBLAST_CLBLAST_CUDA_H_
#define CLBLAST_CLBLAST_CUDA_H_
#include <cstdlib> // For size_t
#include <string> // For OverrideParameters function
#include <unordered_map> // For OverrideParameters function
// CUDA
#include <cuda.h> // CUDA driver API
#include <nvrtc.h> // NVIDIA runtime compilation API
// Exports library functions under Windows when building a DLL. See also:
// https://msdn.microsoft.com/en-us/library/a90k134d.aspx
#if defined(_WIN32) && defined(CLBLAST_DLL)
#if defined(COMPILING_DLL)
#define PUBLIC_API __declspec(dllexport)
#else
#define PUBLIC_API __declspec(dllimport)
#endif
#else
#define PUBLIC_API
#endif
namespace clblast {
// =================================================================================================
// Status codes. These codes can be returned by functions declared in this header file. The error
// codes match either the standard CUDA driver API error codes or the regular CLBlast error codes.
enum class StatusCode {
// Status codes in common with the OpenCL standard
kSuccess = 0, // CUDA_SUCCESS
kInvalidLocalNumDimensions = -53, // CL_INVALID_WORK_DIMENSION: Too many thread dimensions
kInvalidLocalThreadsTotal = -54, // CL_INVALID_WORK_GROUP_SIZE: Too many threads in total
kInvalidLocalThreadsDim = -55, // CL_INVALID_WORK_ITEM_SIZE: ... or for a specific dimension
// Status codes in common with the clBLAS library
kNotImplemented = -1024, // Routine or functionality not implemented yet
kInvalidMatrixA = -1022, // Matrix A is not a valid OpenCL buffer
kInvalidMatrixB = -1021, // Matrix B is not a valid OpenCL buffer
kInvalidMatrixC = -1020, // Matrix C is not a valid OpenCL buffer
kInvalidVectorX = -1019, // Vector X is not a valid OpenCL buffer
kInvalidVectorY = -1018, // Vector Y is not a valid OpenCL buffer
kInvalidDimension = -1017, // Dimensions M, N, and K have to be larger than zero
kInvalidLeadDimA = -1016, // LD of A is smaller than the matrix's first dimension
kInvalidLeadDimB = -1015, // LD of B is smaller than the matrix's first dimension
kInvalidLeadDimC = -1014, // LD of C is smaller than the matrix's first dimension
kInvalidIncrementX = -1013, // Increment of vector X cannot be zero
kInvalidIncrementY = -1012, // Increment of vector Y cannot be zero
kInsufficientMemoryA = -1011, // Matrix A's OpenCL buffer is too small
kInsufficientMemoryB = -1010, // Matrix B's OpenCL buffer is too small
kInsufficientMemoryC = -1009, // Matrix C's OpenCL buffer is too small
kInsufficientMemoryX = -1008, // Vector X's OpenCL buffer is too small
kInsufficientMemoryY = -1007, // Vector Y's OpenCL buffer is too small
// Custom additional status codes for CLBlast
kInvalidBatchCount = -2049, // The batch count needs to be positive
kInvalidOverrideKernel = -2048, // Trying to override parameters for an invalid kernel
kMissingOverrideParameter = -2047, // Missing override parameter(s) for the target kernel
kInvalidLocalMemUsage = -2046, // Not enough local memory available on this device
kNoHalfPrecision = -2045, // Half precision (16-bits) not supported by the device
kNoDoublePrecision = -2044, // Double precision (64-bits) not supported by the device
kInvalidVectorScalar = -2043, // The unit-sized vector is not a valid OpenCL buffer
kInsufficientMemoryScalar = -2042, // The unit-sized vector's OpenCL buffer is too small
kDatabaseError = -2041, // Entry for the device was not found in the database
kUnknownError = -2040, // A catch-all error code representing an unspecified error
kUnexpectedError = -2039, // A catch-all error code representing an unexpected exception
};
// Matrix layout and transpose types
enum class Layout { kRowMajor = 101, kColMajor = 102 };
enum class Transpose { kNo = 111, kYes = 112, kConjugate = 113 };
enum class Triangle { kUpper = 121, kLower = 122 };
enum class Diagonal { kNonUnit = 131, kUnit = 132 };
enum class Side { kLeft = 141, kRight = 142 };
// Precision scoped enum (values in bits)
enum class Precision { kHalf = 16, kSingle = 32, kDouble = 64,
kComplexSingle = 3232, kComplexDouble = 6464, kAny = -1 };
// =================================================================================================
// BLAS level-1 (vector-vector) routines
// =================================================================================================
// Generate givens plane rotation: SROTG/DROTG
template <typename T>
StatusCode Rotg(CUdeviceptr sa_buffer, const size_t sa_offset,
CUdeviceptr sb_buffer, const size_t sb_offset,
CUdeviceptr sc_buffer, const size_t sc_offset,
CUdeviceptr ss_buffer, const size_t ss_offset,
const CUcontext context, const CUdevice device);
// Generate modified givens plane rotation: SROTMG/DROTMG
template <typename T>
StatusCode Rotmg(CUdeviceptr sd1_buffer, const size_t sd1_offset,
CUdeviceptr sd2_buffer, const size_t sd2_offset,
CUdeviceptr sx1_buffer, const size_t sx1_offset,
const CUdeviceptr sy1_buffer, const size_t sy1_offset,
CUdeviceptr sparam_buffer, const size_t sparam_offset,
const CUcontext context, const CUdevice device);
// Apply givens plane rotation: SROT/DROT
template <typename T>
StatusCode Rot(const size_t n,
CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
const T cos,
const T sin,
const CUcontext context, const CUdevice device);
// Apply modified givens plane rotation: SROTM/DROTM
template <typename T>
StatusCode Rotm(const size_t n,
CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
CUdeviceptr sparam_buffer, const size_t sparam_offset,
const CUcontext context, const CUdevice device);
// Swap two vectors: SSWAP/DSWAP/CSWAP/ZSWAP/HSWAP
template <typename T>
StatusCode Swap(const size_t n,
CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
const CUcontext context, const CUdevice device);
// Vector scaling: SSCAL/DSCAL/CSCAL/ZSCAL/HSCAL
template <typename T>
StatusCode Scal(const size_t n,
const T alpha,
CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUcontext context, const CUdevice device);
// Vector copy: SCOPY/DCOPY/CCOPY/ZCOPY/HCOPY
template <typename T>
StatusCode Copy(const size_t n,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
const CUcontext context, const CUdevice device);
// Vector-times-constant plus vector: SAXPY/DAXPY/CAXPY/ZAXPY/HAXPY
template <typename T>
StatusCode Axpy(const size_t n,
const T alpha,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
const CUcontext context, const CUdevice device);
// Dot product of two vectors: SDOT/DDOT/HDOT
template <typename T>
StatusCode Dot(const size_t n,
CUdeviceptr dot_buffer, const size_t dot_offset,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
const CUcontext context, const CUdevice device);
// Dot product of two complex vectors: CDOTU/ZDOTU
template <typename T>
StatusCode Dotu(const size_t n,
CUdeviceptr dot_buffer, const size_t dot_offset,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
const CUcontext context, const CUdevice device);
// Dot product of two complex vectors, one conjugated: CDOTC/ZDOTC
template <typename T>
StatusCode Dotc(const size_t n,
CUdeviceptr dot_buffer, const size_t dot_offset,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
const CUcontext context, const CUdevice device);
// Euclidian norm of a vector: SNRM2/DNRM2/ScNRM2/DzNRM2/HNRM2
template <typename T>
StatusCode Nrm2(const size_t n,
CUdeviceptr nrm2_buffer, const size_t nrm2_offset,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUcontext context, const CUdevice device);
// Absolute sum of values in a vector: SASUM/DASUM/ScASUM/DzASUM/HASUM
template <typename T>
StatusCode Asum(const size_t n,
CUdeviceptr asum_buffer, const size_t asum_offset,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUcontext context, const CUdevice device);
// Sum of values in a vector (non-BLAS function): SSUM/DSUM/ScSUM/DzSUM/HSUM
template <typename T>
StatusCode Sum(const size_t n,
CUdeviceptr sum_buffer, const size_t sum_offset,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUcontext context, const CUdevice device);
// Index of absolute maximum value in a vector: iSAMAX/iDAMAX/iCAMAX/iZAMAX/iHAMAX
template <typename T>
StatusCode Amax(const size_t n,
CUdeviceptr imax_buffer, const size_t imax_offset,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUcontext context, const CUdevice device);
// Index of absolute minimum value in a vector (non-BLAS function): iSAMIN/iDAMIN/iCAMIN/iZAMIN/iHAMIN
template <typename T>
StatusCode Amin(const size_t n,
CUdeviceptr imin_buffer, const size_t imin_offset,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUcontext context, const CUdevice device);
// Index of maximum value in a vector (non-BLAS function): iSMAX/iDMAX/iCMAX/iZMAX/iHMAX
template <typename T>
StatusCode Max(const size_t n,
CUdeviceptr imax_buffer, const size_t imax_offset,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUcontext context, const CUdevice device);
// Index of minimum value in a vector (non-BLAS function): iSMIN/iDMIN/iCMIN/iZMIN/iHMIN
template <typename T>
StatusCode Min(const size_t n,
CUdeviceptr imin_buffer, const size_t imin_offset,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUcontext context, const CUdevice device);
// =================================================================================================
// BLAS level-2 (matrix-vector) routines
// =================================================================================================
// General matrix-vector multiplication: SGEMV/DGEMV/CGEMV/ZGEMV/HGEMV
template <typename T>
StatusCode Gemv(const Layout layout, const Transpose a_transpose,
const size_t m, const size_t n,
const T alpha,
const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const T beta,
CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
const CUcontext context, const CUdevice device);
// General banded matrix-vector multiplication: SGBMV/DGBMV/CGBMV/ZGBMV/HGBMV
template <typename T>
StatusCode Gbmv(const Layout layout, const Transpose a_transpose,
const size_t m, const size_t n, const size_t kl, const size_t ku,
const T alpha,
const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const T beta,
CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
const CUcontext context, const CUdevice device);
// Hermitian matrix-vector multiplication: CHEMV/ZHEMV
template <typename T>
StatusCode Hemv(const Layout layout, const Triangle triangle,
const size_t n,
const T alpha,
const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const T beta,
CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
const CUcontext context, const CUdevice device);
// Hermitian banded matrix-vector multiplication: CHBMV/ZHBMV
template <typename T>
StatusCode Hbmv(const Layout layout, const Triangle triangle,
const size_t n, const size_t k,
const T alpha,
const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const T beta,
CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
const CUcontext context, const CUdevice device);
// Hermitian packed matrix-vector multiplication: CHPMV/ZHPMV
template <typename T>
StatusCode Hpmv(const Layout layout, const Triangle triangle,
const size_t n,
const T alpha,
const CUdeviceptr ap_buffer, const size_t ap_offset,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const T beta,
CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
const CUcontext context, const CUdevice device);
// Symmetric matrix-vector multiplication: SSYMV/DSYMV/HSYMV
template <typename T>
StatusCode Symv(const Layout layout, const Triangle triangle,
const size_t n,
const T alpha,
const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const T beta,
CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
const CUcontext context, const CUdevice device);
// Symmetric banded matrix-vector multiplication: SSBMV/DSBMV/HSBMV
template <typename T>
StatusCode Sbmv(const Layout layout, const Triangle triangle,
const size_t n, const size_t k,
const T alpha,
const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const T beta,
CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
const CUcontext context, const CUdevice device);
// Symmetric packed matrix-vector multiplication: SSPMV/DSPMV/HSPMV
template <typename T>
StatusCode Spmv(const Layout layout, const Triangle triangle,
const size_t n,
const T alpha,
const CUdeviceptr ap_buffer, const size_t ap_offset,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const T beta,
CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
const CUcontext context, const CUdevice device);
// Triangular matrix-vector multiplication: STRMV/DTRMV/CTRMV/ZTRMV/HTRMV
template <typename T>
StatusCode Trmv(const Layout layout, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal,
const size_t n,
const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUcontext context, const CUdevice device);
// Triangular banded matrix-vector multiplication: STBMV/DTBMV/CTBMV/ZTBMV/HTBMV
template <typename T>
StatusCode Tbmv(const Layout layout, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal,
const size_t n, const size_t k,
const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUcontext context, const CUdevice device);
// Triangular packed matrix-vector multiplication: STPMV/DTPMV/CTPMV/ZTPMV/HTPMV
template <typename T>
StatusCode Tpmv(const Layout layout, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal,
const size_t n,
const CUdeviceptr ap_buffer, const size_t ap_offset,
CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUcontext context, const CUdevice device);
// Solves a triangular system of equations: STRSV/DTRSV/CTRSV/ZTRSV
template <typename T>
StatusCode Trsv(const Layout layout, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal,
const size_t n,
const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUcontext context, const CUdevice device);
// Solves a banded triangular system of equations: STBSV/DTBSV/CTBSV/ZTBSV
template <typename T>
StatusCode Tbsv(const Layout layout, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal,
const size_t n, const size_t k,
const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUcontext context, const CUdevice device);
// Solves a packed triangular system of equations: STPSV/DTPSV/CTPSV/ZTPSV
template <typename T>
StatusCode Tpsv(const Layout layout, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal,
const size_t n,
const CUdeviceptr ap_buffer, const size_t ap_offset,
CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUcontext context, const CUdevice device);
// General rank-1 matrix update: SGER/DGER/HGER
template <typename T>
StatusCode Ger(const Layout layout,
const size_t m, const size_t n,
const T alpha,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
const CUcontext context, const CUdevice device);
// General rank-1 complex matrix update: CGERU/ZGERU
template <typename T>
StatusCode Geru(const Layout layout,
const size_t m, const size_t n,
const T alpha,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
const CUcontext context, const CUdevice device);
// General rank-1 complex conjugated matrix update: CGERC/ZGERC
template <typename T>
StatusCode Gerc(const Layout layout,
const size_t m, const size_t n,
const T alpha,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
const CUcontext context, const CUdevice device);
// Hermitian rank-1 matrix update: CHER/ZHER
template <typename T>
StatusCode Her(const Layout layout, const Triangle triangle,
const size_t n,
const T alpha,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
const CUcontext context, const CUdevice device);
// Hermitian packed rank-1 matrix update: CHPR/ZHPR
template <typename T>
StatusCode Hpr(const Layout layout, const Triangle triangle,
const size_t n,
const T alpha,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
CUdeviceptr ap_buffer, const size_t ap_offset,
const CUcontext context, const CUdevice device);
// Hermitian rank-2 matrix update: CHER2/ZHER2
template <typename T>
StatusCode Her2(const Layout layout, const Triangle triangle,
const size_t n,
const T alpha,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
const CUcontext context, const CUdevice device);
// Hermitian packed rank-2 matrix update: CHPR2/ZHPR2
template <typename T>
StatusCode Hpr2(const Layout layout, const Triangle triangle,
const size_t n,
const T alpha,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
CUdeviceptr ap_buffer, const size_t ap_offset,
const CUcontext context, const CUdevice device);
// Symmetric rank-1 matrix update: SSYR/DSYR/HSYR
template <typename T>
StatusCode Syr(const Layout layout, const Triangle triangle,
const size_t n,
const T alpha,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
const CUcontext context, const CUdevice device);
// Symmetric packed rank-1 matrix update: SSPR/DSPR/HSPR
template <typename T>
StatusCode Spr(const Layout layout, const Triangle triangle,
const size_t n,
const T alpha,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
CUdeviceptr ap_buffer, const size_t ap_offset,
const CUcontext context, const CUdevice device);
// Symmetric rank-2 matrix update: SSYR2/DSYR2/HSYR2
template <typename T>
StatusCode Syr2(const Layout layout, const Triangle triangle,
const size_t n,
const T alpha,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
const CUcontext context, const CUdevice device);
// Symmetric packed rank-2 matrix update: SSPR2/DSPR2/HSPR2
template <typename T>
StatusCode Spr2(const Layout layout, const Triangle triangle,
const size_t n,
const T alpha,
const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
CUdeviceptr ap_buffer, const size_t ap_offset,
const CUcontext context, const CUdevice device);
// =================================================================================================
// BLAS level-3 (matrix-matrix) routines
// =================================================================================================
// General matrix-matrix multiplication: SGEMM/DGEMM/CGEMM/ZGEMM/HGEMM
template <typename T>
StatusCode Gemm(const Layout layout, const Transpose a_transpose, const Transpose b_transpose,
const size_t m, const size_t n, const size_t k,
const T alpha,
const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld,
const T beta,
CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld,
const CUcontext context, const CUdevice device);
// Symmetric matrix-matrix multiplication: SSYMM/DSYMM/CSYMM/ZSYMM/HSYMM
template <typename T>
StatusCode Symm(const Layout layout, const Side side, const Triangle triangle,
const size_t m, const size_t n,
const T alpha,
const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld,
const T beta,
CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld,
const CUcontext context, const CUdevice device);
// Hermitian matrix-matrix multiplication: CHEMM/ZHEMM
template <typename T>
StatusCode Hemm(const Layout layout, const Side side, const Triangle triangle,
const size_t m, const size_t n,
const T alpha,
const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld,
const T beta,
CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld,
const CUcontext context, const CUdevice device);
// Rank-K update of a symmetric matrix: SSYRK/DSYRK/CSYRK/ZSYRK/HSYRK
template <typename T>
StatusCode Syrk(const Layout layout, const Triangle triangle, const Transpose a_transpose,
const size_t n, const size_t k,
const T alpha,
const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
const T beta,
CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld,
const CUcontext context, const CUdevice device);
// Rank-K update of a hermitian matrix: CHERK/ZHERK
template <typename T>
StatusCode Herk(const Layout layout, const Triangle triangle, const Transpose a_transpose,
const size_t n, const size_t k,
const T alpha,
const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
const T beta,
CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld,
const CUcontext context, const CUdevice device);
// Rank-2K update of a symmetric matrix: SSYR2K/DSYR2K/CSYR2K/ZSYR2K/HSYR2K
template <typename T>
StatusCode Syr2k(const Layout layout, const Triangle triangle, const Transpose ab_transpose,
const size_t n, const size_t k,
const T alpha,
const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld,
const T beta,
CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld,
const CUcontext context, const CUdevice device);
// Rank-2K update of a hermitian matrix: CHER2K/ZHER2K
template <typename T, typename U>
StatusCode Her2k(const Layout layout, const Triangle triangle, const Transpose ab_transpose,
const size_t n, const size_t k,
const T alpha,
const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld,
const U beta,
CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld,
const CUcontext context, const CUdevice device);
// Triangular matrix-matrix multiplication: STRMM/DTRMM/CTRMM/ZTRMM/HTRMM
template <typename T>
StatusCode Trmm(const Layout layout, const Side side, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal,
const size_t m, const size_t n,
const T alpha,
const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld,
const CUcontext context, const CUdevice device);
// Solves a triangular system of equations: STRSM/DTRSM/CTRSM/ZTRSM
template <typename T>
StatusCode Trsm(const Layout layout, const Side side, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal,
const size_t m, const size_t n,
const T alpha,
const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld,
const CUcontext context, const CUdevice device);
// =================================================================================================
// Extra non-BLAS routines (level-X)
// =================================================================================================
// Scaling and out-place transpose/copy (non-BLAS function): SOMATCOPY/DOMATCOPY/COMATCOPY/ZOMATCOPY/HOMATCOPY
template <typename T>
StatusCode Omatcopy(const Layout layout, const Transpose a_transpose,
const size_t m, const size_t n,
const T alpha,
const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld,
const CUcontext context, const CUdevice device);
// Im2col function (non-BLAS function): SIM2COL/DIM2COL/CIM2COL/ZIM2COL/HIM2COL
template <typename T>
StatusCode Im2col(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w,
const CUdeviceptr im_buffer, const size_t im_offset,
CUdeviceptr col_buffer, const size_t col_offset,
const CUcontext context, const CUdevice device);
// Batched version of AXPY: SAXPYBATCHED/DAXPYBATCHED/CAXPYBATCHED/ZAXPYBATCHED/HAXPYBATCHED
template <typename T>
StatusCode AxpyBatched(const size_t n,
const T *alphas,
const CUdeviceptr x_buffer, const size_t *x_offsets, const size_t x_inc,
CUdeviceptr y_buffer, const size_t *y_offsets, const size_t y_inc,
const size_t batch_count,
const CUcontext context, const CUdevice device);
// Batched version of GEMM: SGEMMBATCHED/DGEMMBATCHED/CGEMMBATCHED/ZGEMMBATCHED/HGEMMBATCHED
template <typename T>
StatusCode GemmBatched(const Layout layout, const Transpose a_transpose, const Transpose b_transpose,
const size_t m, const size_t n, const size_t k,
const T *alphas,
const CUdeviceptr a_buffer, const size_t *a_offsets, const size_t a_ld,
const CUdeviceptr b_buffer, const size_t *b_offsets, const size_t b_ld,
const T *betas,
CUdeviceptr c_buffer, const size_t *c_offsets, const size_t c_ld,
const size_t batch_count,
const CUcontext context, const CUdevice device);
// =================================================================================================
// CLBlast stores binaries of compiled kernels into a cache in case the same kernel is used later on
// for the same device. This cache can be cleared to free up system memory or in case of debugging.
StatusCode PUBLIC_API ClearCache();
// The cache can also be pre-initialized for a specific device with all possible CLBLast kernels.
// Further CLBlast routine calls will then run at maximum speed.
StatusCode PUBLIC_API FillCache(const CUdevice device);
// =================================================================================================
// Overrides tuning parameters for a specific device-precision-kernel combination. The next time
// the target routine is called it will re-compile and use the new parameters from then on.
StatusCode PUBLIC_API OverrideParameters(const CUdevice device, const std::string &kernel_name,
const Precision precision,
const std::unordered_map<std::string,size_t> &parameters);
// =================================================================================================
} // namespace clblast
// CLBLAST_CLBLAST_CUDA_H_
#endif

View File

@ -18,13 +18,6 @@
#ifndef CLBLAST_HALF_H_
#define CLBLAST_HALF_H_
// Includes the normal OpenCL C header
#if defined(__APPLE__) || defined(__MACOSX)
#include <OpenCL/opencl.h>
#else
#include <CL/opencl.h>
#endif
// MSVC 2013 doesn't fully support C99
#ifdef _MSC_VER
#define inline __inline
@ -34,6 +27,7 @@
// The host data-type for half-precision floating-point (16-bit) is based on the `cl_half` OpenCL
// type, which is a typedef for unsigned short.
typedef unsigned short half;
// 32-bit union for conversions
typedef union ConversionBits_ {
@ -46,7 +40,7 @@ typedef union ConversionBits_ {
// Converts a IEEE-compliant single-precision value to half-precision floating-point. This function
// applies simple truncation (round toward zero, but with overflows set to infinity) as rounding
// mode.
inline cl_half FloatToHalf(const float value) {
inline half FloatToHalf(const float value) {
static const unsigned short base_table[512] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
@ -107,7 +101,7 @@ inline cl_half FloatToHalf(const float value) {
}
// Converts a half-precision value to IEEE-compliant single-precision floating-point
inline float HalfToFloat(const cl_half value) {
inline float HalfToFloat(const half value) {
static const unsigned int mantissa_table[2048] = {
0x00000000, 0x33800000, 0x34000000, 0x34400000, 0x34800000, 0x34A00000, 0x34C00000, 0x34E00000, 0x35000000, 0x35100000, 0x35200000, 0x35300000, 0x35400000, 0x35500000, 0x35600000, 0x35700000,
0x35800000, 0x35880000, 0x35900000, 0x35980000, 0x35A00000, 0x35A80000, 0x35B00000, 0x35B80000, 0x35C00000, 0x35C80000, 0x35D00000, 0x35D80000, 0x35E00000, 0x35E80000, 0x35F00000, 0x35F80000,

View File

@ -0,0 +1,88 @@
// =================================================================================================
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
// width of 100 characters per line.
//
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file demonstrates the use of the DAXPY routine with the C++ CUDA API of CLBlast.
//
// Note that this example is meant for illustration purposes only. CLBlast provides other programs
// for performance benchmarking ('client_xxxxx') and for correctness testing ('test_xxxxx').
//
// =================================================================================================
#include <cstdio>
#include <chrono>
#include <vector>
// Includes the CUDA driver API
#include <cuda.h>
// Includes the CLBlast library
#include <clblast_cuda.h>
// =================================================================================================
// Example use of the double-precision Xaxpy routine DAXPY
int main() {
// CUDA device selection
const auto device_id = 0;
// Example DAXPY arguments
const size_t n = 8192;
const double alpha = 0.7;
// Initializes the OpenCL device
cuInit(0);
CUdevice device;
cuDeviceGet(&device, device_id);
// Creates the OpenCL context and stream
CUcontext context;
cuCtxCreate(&context, 0, device);
CUstream stream;
cuStreamCreate(&stream, CU_STREAM_NON_BLOCKING);
// Populate host matrices with some example data
auto host_a = std::vector<double>(n);
auto host_b = std::vector<double>(n);
for (auto &item: host_a) { item = 12.193; }
for (auto &item: host_b) { item = -8.199; }
// Copy the matrices to the device
CUdeviceptr device_a;
CUdeviceptr device_b;
cuMemAlloc(&device_a, host_a.size()*sizeof(double));
cuMemAlloc(&device_b, host_b.size()*sizeof(double));
cuMemcpyHtoDAsync(device_a, host_a.data(), host_a.size()*sizeof(double), stream);
cuMemcpyHtoDAsync(device_b, host_b.data(), host_b.size()*sizeof(double), stream);
// Start the timer
auto start_time = std::chrono::steady_clock::now();
// Call the DAXPY routine. Note that the type of alpha (double) determines the precision.
const auto status = clblast::Axpy(n, alpha,
device_a, 0, 1,
device_b, 0, 1,
context, device);
cuStreamSynchronize(stream);
// Record the execution time
auto elapsed_time = std::chrono::steady_clock::now() - start_time;
auto time_ms = std::chrono::duration<double,std::milli>(elapsed_time).count();
// Example completed. See "clblast_cuda.h" for status codes (0 -> success).
printf("Completed DAXPY in %.3lf ms with status %d\n", time_ms, static_cast<int>(status));
// Clean-up
cuMemFree(device_a);
cuMemFree(device_b);
cuStreamDestroy(stream);
return 0;
}
// =================================================================================================

View File

@ -0,0 +1,105 @@
// =================================================================================================
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
// width of 100 characters per line.
//
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file demonstrates the use of the SGEMM routine with the C++ CUDA API of CLBlast.
//
// Note that this example is meant for illustration purposes only. CLBlast provides other programs
// for performance benchmarking ('client_xxxxx') and for correctness testing ('test_xxxxx').
//
// =================================================================================================
#include <cstdio>
#include <chrono>
#include <vector>
// Includes the CUDA driver API
#include <cuda.h>
// Includes the CLBlast library
#include <clblast_cuda.h>
// =================================================================================================
// Example use of the single-precision Xgemm routine SGEMM
int main() {
// CUDA device selection
const auto device_id = 0;
// Example SGEMM arguments
const size_t m = 128;
const size_t n = 64;
const size_t k = 512;
const float alpha = 0.7f;
const float beta = 1.0f;
const auto a_ld = k;
const auto b_ld = n;
const auto c_ld = n;
// Initializes the OpenCL device
cuInit(0);
CUdevice device;
cuDeviceGet(&device, device_id);
// Creates the OpenCL context and stream
CUcontext context;
cuCtxCreate(&context, 0, device);
CUstream stream;
cuStreamCreate(&stream, CU_STREAM_NON_BLOCKING);
// Populate host matrices with some example data
auto host_a = std::vector<float>(m*k);
auto host_b = std::vector<float>(n*k);
auto host_c = std::vector<float>(m*n);
for (auto &item: host_a) { item = 12.193f; }
for (auto &item: host_b) { item = -8.199f; }
for (auto &item: host_c) { item = 0.0f; }
// Copy the matrices to the device
CUdeviceptr device_a;
CUdeviceptr device_b;
CUdeviceptr device_c;
cuMemAlloc(&device_a, host_a.size()*sizeof(float));
cuMemAlloc(&device_b, host_b.size()*sizeof(float));
cuMemAlloc(&device_c, host_c.size()*sizeof(float));
cuMemcpyHtoDAsync(device_a, host_a.data(), host_a.size()*sizeof(float), stream);
cuMemcpyHtoDAsync(device_b, host_b.data(), host_b.size()*sizeof(float), stream);
cuMemcpyHtoDAsync(device_c, host_c.data(), host_c.size()*sizeof(float), stream);
// Start the timer
auto start_time = std::chrono::steady_clock::now();
// Call the SGEMM routine. Note that the type of alpha and beta (float) determine the precision.
auto status = clblast::Gemm(clblast::Layout::kRowMajor,
clblast::Transpose::kNo, clblast::Transpose::kNo,
m, n, k,
alpha,
device_a, 0, a_ld,
device_b, 0, b_ld,
beta,
device_c, 0, c_ld,
context, device);
cuStreamSynchronize(stream);
// Record the execution time
auto elapsed_time = std::chrono::steady_clock::now() - start_time;
auto time_ms = std::chrono::duration<double,std::milli>(elapsed_time).count();
// Example completed. See "clblast_cuda.h" for status codes (0 -> success).
printf("Completed SGEMM in %.3lf ms with status %d\n", time_ms, static_cast<int>(status));
// Clean-up
cuMemFree(device_a);
cuMemFree(device_b);
cuMemFree(device_c);
cuStreamDestroy(stream);
return 0;
}
// =================================================================================================

View File

@ -12,6 +12,8 @@
# clblast.cpp
# clblast_c.h
# clblast_c.cpp
# clblast_cuda.h
# clblast_cuda.cpp
# clblast_netlib_c.h
# clblast_netlib_c.cpp
# wrapper_clblas.h
@ -41,9 +43,11 @@ FILES = [
"/test/wrapper_cublas.hpp",
"/include/clblast_netlib_c.h",
"/src/clblast_netlib_c.cpp",
"/include/clblast_cuda.h",
"/src/clblast_cuda.cpp",
]
HEADER_LINES = [122, 79, 126, 24, 29, 41, 29, 65, 32]
FOOTER_LINES = [25, 147, 27, 38, 6, 6, 6, 9, 2]
HEADER_LINES = [122, 21, 126, 24, 29, 41, 29, 65, 32, 94, 21]
FOOTER_LINES = [25, 3, 27, 38, 6, 6, 6, 9, 2, 25, 3]
HEADER_LINES_DOC = 0
FOOTER_LINES_DOC = 63
@ -224,6 +228,10 @@ def main(argv):
if i == 8:
if not routine.batched:
body += cpp.clblast_netlib_c_cc(routine)
if i == 9:
body += cpp.clblast_h(routine, cuda=True)
if i == 10:
body += cpp.clblast_cc(routine, cuda=True)
f.write("".join(file_header))
f.write(body)
f.write("".join(file_footer))

View File

@ -36,22 +36,28 @@ HEADER = NL + SEPARATOR + """
""" + SEPARATOR + NL
def clblast_h(routine):
def clblast_h(routine, cuda=False):
"""The C++ API header (.h)"""
result = NL + "// " + routine.description + ": " + routine.short_names() + NL
result += routine.routine_header_cpp(12, " = nullptr") + ";" + NL
result += routine.routine_header_cpp(12, " = nullptr", cuda) + ";" + NL
return result
def clblast_cc(routine):
def clblast_cc(routine, cuda=False):
"""The C++ API implementation (.cpp)"""
indent1 = " " * (15 + routine.length())
result = NL + "// " + routine.description + ": " + routine.short_names() + NL
if routine.implemented:
result += routine.routine_header_cpp(12, "") + " {" + NL
result += routine.routine_header_cpp(12, "", cuda) + " {" + NL
result += " try {" + NL
result += " auto queue_cpp = Queue(*queue);" + NL
result += " auto routine = X" + routine.plain_name() + "<" + routine.template.template + ">(queue_cpp, event);" + NL
if cuda:
result += " const auto context_cpp = Context(context);" + NL
result += " const auto device_cpp = Device(device);" + NL
result += " auto queue_cpp = Queue(context_cpp, device_cpp);" + NL
else:
result += " auto queue_cpp = Queue(*queue);" + NL
event = "nullptr" if cuda else "event"
result += " auto routine = X" + routine.plain_name() + "<" + routine.template.template + ">(queue_cpp, " + event + ");" + NL
if routine.batched:
result += " " + (NL + " ").join(routine.batched_transform_to_cpp()) + NL
result += " routine.Do" + routine.capitalized_name() + "("
@ -60,14 +66,22 @@ def clblast_cc(routine):
result += " return StatusCode::kSuccess;" + NL
result += " } catch (...) { return DispatchException(); }" + NL
else:
result += routine.routine_header_type_cpp(12) + " {" + NL
result += routine.routine_header_type_cpp(12, cuda) + " {" + NL
result += " return StatusCode::kNotImplemented;" + NL
result += "}" + NL
for flavour in routine.flavours:
indent2 = " " * (34 + routine.length() + len(flavour.template))
result += "template StatusCode PUBLIC_API " + routine.capitalized_name() + "<" + flavour.template + ">("
result += ("," + NL + indent2).join([a for a in routine.arguments_type(flavour)])
result += "," + NL + indent2 + "cl_command_queue*, cl_event*);" + NL
arguments = routine.arguments_type(flavour)
if cuda:
arguments = [a.replace("cl_mem", "CUdeviceptr") for a in arguments]
result += ("," + NL + indent2).join([a for a in arguments])
result += "," + NL + indent2
if cuda:
result += "const CUcontext, const CUdevice"
else:
result += "cl_command_queue*, cl_event*"
result += ");" + NL
return result
@ -364,7 +378,9 @@ def performance_test(routine, level_string):
found = False
for flavour in routine.flavours:
if flavour.precision_name == precision:
result += NL + " clblast::RunClient<clblast::TestX" + routine.plain_name() + flavour.test_template()
extra_template_argument = "0, " if routine.name == "gemm" and not routine.batched else ""
result += NL + " clblast::RunClient<clblast::TestX" + routine.plain_name()
result += flavour.test_template(extra_template_argument)
result += ">(argc, argv); break;" + NL
found = True
if not found:
@ -384,10 +400,13 @@ def correctness_test(routine, level_string):
result += "int main(int argc, char *argv[]) {" + NL
result += " auto errors = size_t{0};" + NL
not_first = "false"
for flavour in routine.flavours:
result += " errors += clblast::RunTests<clblast::TestX" + routine.plain_name() + flavour.test_template()
result += ">(argc, argv, " + not_first + ", \"" + flavour.name + routine.upper_name() + "\");" + NL
not_first = "true"
extra_template_arguments = ["1, ", "2, "] if routine.name == "gemm" and not routine.batched else [""]
for extra_template_argument in extra_template_arguments:
for flavour in routine.flavours:
result += " errors += clblast::RunTests<clblast::TestX" + routine.plain_name()
result += flavour.test_template(extra_template_argument)
result += ">(argc, argv, " + not_first + ", \"" + flavour.name + routine.upper_name() + "\");" + NL
not_first = "true"
result += " if (errors > 0) { return 1; } else { return 0; }" + NL
result += "}" + NL
return result

View File

@ -70,13 +70,13 @@ class DataType:
return self.beta_cpp + "{reinterpret_cast<const double*>(beta)[0], reinterpret_cast<const double*>(beta)[1]}"
return "beta"
def test_template(self):
def test_template(self, extra_template_argument):
"""Returns the template as used in the correctness/performance tests"""
buffer_type = "clblast::" + self.buffer_type if self.is_non_standard() else self.buffer_type
beta_cpp = "clblast::" + self.beta_cpp if self.beta_cpp in [D_HALF, D_FLOAT2, D_DOUBLE2] else self.beta_cpp
if self.buffer_type != self.beta_cpp:
return "<" + buffer_type + "," + self.beta_cpp + ">, " + buffer_type + ", " + beta_cpp
return "<" + buffer_type + ">, " + buffer_type + ", " + beta_cpp
return "<" + extra_template_argument + buffer_type + "," + self.beta_cpp + ">, " + buffer_type + ", " + beta_cpp
return "<" + extra_template_argument + buffer_type + ">, " + buffer_type + ", " + beta_cpp
def is_complex(self, scalar):
"""Current scalar is complex"""

View File

@ -802,22 +802,38 @@ class Routine:
"""Retrieves a list of routine requirements for documentation"""
return self.requirements
def routine_header_cpp(self, spaces, default_event):
def routine_header_cpp(self, spaces, default_event, cuda=False):
"""Retrieves the C++ templated definition for a routine"""
indent = " " * (spaces + self.length())
arguments = self.arguments_def(self.template)
if cuda:
arguments = [a.replace("cl_mem", "CUdeviceptr") for a in arguments]
result = "template <" + self.template.name + ">\n"
result += "StatusCode " + self.capitalized_name() + "("
result += (",\n" + indent).join([a for a in self.arguments_def(self.template)])
result += ",\n" + indent + "cl_command_queue* queue, cl_event* event" + default_event + ")"
result += (",\n" + indent).join([a for a in arguments])
result += ",\n" + indent
if cuda:
result += "const CUcontext context, const CUdevice device"
else:
result += "cl_command_queue* queue, cl_event* event" + default_event
result += ")"
return result
def routine_header_type_cpp(self, spaces):
def routine_header_type_cpp(self, spaces, cuda=False):
"""As above, but now without variable names"""
indent = " " * (spaces + self.length())
arguments = self.arguments_type(self.template)
if cuda:
arguments = [a.replace("cl_mem", "CUdeviceptr") for a in arguments]
result = "template <" + self.template.name + ">\n"
result += "StatusCode " + self.capitalized_name() + "("
result += (",\n" + indent).join([a for a in self.arguments_type(self.template)])
result += ",\n" + indent + "cl_command_queue*, cl_event*)"
result += (",\n" + indent).join([a for a in arguments])
result += ",\n" + indent
if cuda:
result += "const CUcontext, const CUdevice"
else:
result += "cl_command_queue*, cl_event*"
result += ")"
return result
def routine_header_c(self, flavour, spaces, extra_qualifier):

169
src/api_common.cpp 100644
View File

@ -0,0 +1,169 @@
// =================================================================================================
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
// width of 100 characters per line.
//
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file implements the common (non-OpenCL-specific) functions of the CLBlast API.
//
// =================================================================================================
#include <string>
#include "utilities/utilities.hpp"
#include "cache.hpp"
#include "routines/routines.hpp"
namespace clblast {
// =================================================================================================
// Clears the cache of stored binaries
StatusCode ClearCache() {
try {
ProgramCache::Instance().Invalidate();
BinaryCache::Instance().Invalidate();
} catch (...) { return DispatchException(); }
return StatusCode::kSuccess;
}
template <typename Real, typename Complex>
void FillCacheForPrecision(Queue &queue) {
try {
// Runs all the level 1 set-up functions
Xswap<Real>(queue, nullptr); Xswap<Complex>(queue, nullptr);
Xswap<Real>(queue, nullptr); Xswap<Complex>(queue, nullptr);
Xscal<Real>(queue, nullptr); Xscal<Complex>(queue, nullptr);
Xcopy<Real>(queue, nullptr); Xcopy<Complex>(queue, nullptr);
Xaxpy<Real>(queue, nullptr); Xaxpy<Complex>(queue, nullptr);
Xdot<Real>(queue, nullptr);
Xdotu<Complex>(queue, nullptr);
Xdotc<Complex>(queue, nullptr);
Xnrm2<Real>(queue, nullptr); Xnrm2<Complex>(queue, nullptr);
Xasum<Real>(queue, nullptr); Xasum<Complex>(queue, nullptr);
Xsum<Real>(queue, nullptr); Xsum<Complex>(queue, nullptr);
Xamax<Real>(queue, nullptr); Xamax<Complex>(queue, nullptr);
Xmax<Real>(queue, nullptr); Xmax<Complex>(queue, nullptr);
Xmin<Real>(queue, nullptr); Xmin<Complex>(queue, nullptr);
// Runs all the level 2 set-up functions
Xgemv<Real>(queue, nullptr); Xgemv<Complex>(queue, nullptr);
Xgbmv<Real>(queue, nullptr); Xgbmv<Complex>(queue, nullptr);
Xhemv<Complex>(queue, nullptr);
Xhbmv<Complex>(queue, nullptr);
Xhpmv<Complex>(queue, nullptr);
Xsymv<Real>(queue, nullptr);
Xsbmv<Real>(queue, nullptr);
Xspmv<Real>(queue, nullptr);
Xtrmv<Real>(queue, nullptr); Xtrmv<Complex>(queue, nullptr);
Xtbmv<Real>(queue, nullptr); Xtbmv<Complex>(queue, nullptr);
Xtpmv<Real>(queue, nullptr); Xtpmv<Complex>(queue, nullptr);
Xger<Real>(queue, nullptr);
Xgeru<Complex>(queue, nullptr);
Xgerc<Complex>(queue, nullptr);
Xher<Complex,Real>(queue, nullptr);
Xhpr<Complex,Real>(queue, nullptr);
Xher2<Complex>(queue, nullptr);
Xhpr2<Complex>(queue, nullptr);
Xsyr<Real>(queue, nullptr);
Xspr<Real>(queue, nullptr);
Xsyr2<Real>(queue, nullptr);
Xspr2<Real>(queue, nullptr);
// Runs all the level 3 set-up functions
Xgemm<Real>(queue, nullptr); Xgemm<Complex>(queue, nullptr);
Xsymm<Real>(queue, nullptr); Xsymm<Complex>(queue, nullptr);
Xhemm<Complex>(queue, nullptr);
Xsyrk<Real>(queue, nullptr); Xsyrk<Complex>(queue, nullptr);
Xherk<Complex,Real>(queue, nullptr);
Xsyr2k<Real>(queue, nullptr); Xsyr2k<Complex>(queue, nullptr);
Xher2k<Complex,Real>(queue, nullptr);
Xtrmm<Real>(queue, nullptr); Xtrmm<Complex>(queue, nullptr);
// Runs all the non-BLAS set-up functions
Xomatcopy<Real>(queue, nullptr); Xomatcopy<Complex>(queue, nullptr);
} catch(const RuntimeErrorCode &e) {
if (e.status() != StatusCode::kNoDoublePrecision &&
e.status() != StatusCode::kNoHalfPrecision) {
throw;
}
}
}
// Fills the cache with all binaries for a specific device
// TODO: Add half-precision FP16 set-up calls
StatusCode FillCache(const RawDeviceID device) {
try {
// Creates a sample context and queue to match the normal routine calling conventions
auto device_cpp = Device(device);
auto context = Context(device_cpp);
auto queue = Queue(context, device_cpp);
FillCacheForPrecision<float, float2>(queue);
FillCacheForPrecision<double, double2>(queue);
} catch (...) { return DispatchException(); }
return StatusCode::kSuccess;
}
// =================================================================================================
// Overrides the tuning parameters for this device-precision-kernel combination
StatusCode OverrideParameters(const RawDeviceID device, const std::string &kernel_name,
const Precision precision,
const std::unordered_map<std::string,size_t> &parameters) {
try {
// Retrieves the device name
const auto device_cpp = Device(device);
const auto platform_id = device_cpp.PlatformID();
const auto device_name = GetDeviceName(device_cpp);
// Retrieves the current database values to verify whether the new ones are complete
auto in_cache = false;
auto current_database = DatabaseCache::Instance().Get(DatabaseKeyRef{platform_id, device, precision, kernel_name}, &in_cache);
if (!in_cache) {
log_debug("Searching database for kernel '" + kernel_name + "'");
current_database = Database(device_cpp, kernel_name, precision, {});
}
// Verifies the parameters size
const auto current_parameter_names = current_database.GetParameterNames();
if (current_parameter_names.size() != parameters.size()) {
return StatusCode::kMissingOverrideParameter;
}
// Retrieves the names and values separately and in the same order as the existing database
auto parameter_values = database::Params{0};
auto i = size_t{0};
for (const auto &current_param : current_parameter_names) {
if (parameters.find(current_param) == parameters.end()) {
return StatusCode::kMissingOverrideParameter;
}
const auto parameter_value = parameters.at(current_param);
parameter_values[i] = parameter_value;
++i;
}
// Creates a small custom database based on the provided parameters
const auto database_device = database::DatabaseDevice{database::kDeviceNameDefault, parameter_values};
const auto database_architecture = database::DatabaseArchitecture{"default", {database_device}};
const auto database_vendor = database::DatabaseVendor{database::kDeviceTypeAll, "default", {database_architecture}};
const auto database_entry = database::DatabaseEntry{kernel_name, precision, current_parameter_names, {database_vendor}};
const auto database_entries = std::vector<database::DatabaseEntry>{database_entry};
const auto database = Database(device_cpp, kernel_name, precision, database_entries);
// Removes the old database entry and stores the new one in the cache
DatabaseCache::Instance().Remove(DatabaseKey{platform_id, device, precision, kernel_name});
DatabaseCache::Instance().Store(DatabaseKey{platform_id, device, precision, kernel_name}, Database(database));
} catch (...) { return DispatchException(); }
return StatusCode::kSuccess;
}
// =================================================================================================
} // namespace clblast

View File

@ -15,67 +15,9 @@
#include <string>
#include "cache.hpp"
#include "routines/routines.hpp"
#include "clblast.h"
// BLAS level-1 includes
#include "routines/level1/xswap.hpp"
#include "routines/level1/xscal.hpp"
#include "routines/level1/xcopy.hpp"
#include "routines/level1/xaxpy.hpp"
#include "routines/level1/xdot.hpp"
#include "routines/level1/xdotu.hpp"
#include "routines/level1/xdotc.hpp"
#include "routines/level1/xnrm2.hpp"
#include "routines/level1/xasum.hpp"
#include "routines/level1/xsum.hpp" // non-BLAS routine
#include "routines/level1/xamax.hpp"
#include "routines/level1/xamin.hpp" // non-BLAS routine
#include "routines/level1/xmax.hpp" // non-BLAS routine
#include "routines/level1/xmin.hpp" // non-BLAS routine
// BLAS level-2 includes
#include "routines/level2/xgemv.hpp"
#include "routines/level2/xgbmv.hpp"
#include "routines/level2/xhemv.hpp"
#include "routines/level2/xhbmv.hpp"
#include "routines/level2/xhpmv.hpp"
#include "routines/level2/xsymv.hpp"
#include "routines/level2/xsbmv.hpp"
#include "routines/level2/xspmv.hpp"
#include "routines/level2/xtrmv.hpp"
#include "routines/level2/xtbmv.hpp"
#include "routines/level2/xtpmv.hpp"
#include "routines/level2/xtrsv.hpp"
#include "routines/level2/xger.hpp"
#include "routines/level2/xgeru.hpp"
#include "routines/level2/xgerc.hpp"
#include "routines/level2/xher.hpp"
#include "routines/level2/xhpr.hpp"
#include "routines/level2/xher2.hpp"
#include "routines/level2/xhpr2.hpp"
#include "routines/level2/xsyr.hpp"
#include "routines/level2/xspr.hpp"
#include "routines/level2/xsyr2.hpp"
#include "routines/level2/xspr2.hpp"
// BLAS level-3 includes
#include "routines/level3/xgemm.hpp"
#include "routines/level3/xsymm.hpp"
#include "routines/level3/xhemm.hpp"
#include "routines/level3/xsyrk.hpp"
#include "routines/level3/xherk.hpp"
#include "routines/level3/xsyr2k.hpp"
#include "routines/level3/xher2k.hpp"
#include "routines/level3/xtrmm.hpp"
#include "routines/level3/xtrsm.hpp"
// Level-x includes (non-BLAS)
#include "routines/levelx/xomatcopy.hpp"
#include "routines/levelx/xim2col.hpp"
#include "routines/levelx/xaxpybatched.hpp"
#include "routines/levelx/xgemmbatched.hpp"
namespace clblast {
// =================================================================================================
@ -2389,153 +2331,6 @@ template StatusCode PUBLIC_API GemmBatched<half>(const Layout, const Transpose,
cl_mem, const size_t*, const size_t,
const size_t,
cl_command_queue*, cl_event*);
// =================================================================================================
// Clears the cache of stored binaries
StatusCode ClearCache() {
try {
ProgramCache::Instance().Invalidate();
BinaryCache::Instance().Invalidate();
} catch (...) { return DispatchException(); }
return StatusCode::kSuccess;
}
template <typename Real, typename Complex>
void FillCacheForPrecision(Queue &queue) {
try {
// Runs all the level 1 set-up functions
Xswap<Real>(queue, nullptr); Xswap<Complex>(queue, nullptr);
Xswap<Real>(queue, nullptr); Xswap<Complex>(queue, nullptr);
Xscal<Real>(queue, nullptr); Xscal<Complex>(queue, nullptr);
Xcopy<Real>(queue, nullptr); Xcopy<Complex>(queue, nullptr);
Xaxpy<Real>(queue, nullptr); Xaxpy<Complex>(queue, nullptr);
Xdot<Real>(queue, nullptr);
Xdotu<Complex>(queue, nullptr);
Xdotc<Complex>(queue, nullptr);
Xnrm2<Real>(queue, nullptr); Xnrm2<Complex>(queue, nullptr);
Xasum<Real>(queue, nullptr); Xasum<Complex>(queue, nullptr);
Xsum<Real>(queue, nullptr); Xsum<Complex>(queue, nullptr);
Xamax<Real>(queue, nullptr); Xamax<Complex>(queue, nullptr);
Xmax<Real>(queue, nullptr); Xmax<Complex>(queue, nullptr);
Xmin<Real>(queue, nullptr); Xmin<Complex>(queue, nullptr);
// Runs all the level 2 set-up functions
Xgemv<Real>(queue, nullptr); Xgemv<Complex>(queue, nullptr);
Xgbmv<Real>(queue, nullptr); Xgbmv<Complex>(queue, nullptr);
Xhemv<Complex>(queue, nullptr);
Xhbmv<Complex>(queue, nullptr);
Xhpmv<Complex>(queue, nullptr);
Xsymv<Real>(queue, nullptr);
Xsbmv<Real>(queue, nullptr);
Xspmv<Real>(queue, nullptr);
Xtrmv<Real>(queue, nullptr); Xtrmv<Complex>(queue, nullptr);
Xtbmv<Real>(queue, nullptr); Xtbmv<Complex>(queue, nullptr);
Xtpmv<Real>(queue, nullptr); Xtpmv<Complex>(queue, nullptr);
Xger<Real>(queue, nullptr);
Xgeru<Complex>(queue, nullptr);
Xgerc<Complex>(queue, nullptr);
Xher<Complex,Real>(queue, nullptr);
Xhpr<Complex,Real>(queue, nullptr);
Xher2<Complex>(queue, nullptr);
Xhpr2<Complex>(queue, nullptr);
Xsyr<Real>(queue, nullptr);
Xspr<Real>(queue, nullptr);
Xsyr2<Real>(queue, nullptr);
Xspr2<Real>(queue, nullptr);
// Runs all the level 3 set-up functions
Xgemm<Real>(queue, nullptr); Xgemm<Complex>(queue, nullptr);
Xsymm<Real>(queue, nullptr); Xsymm<Complex>(queue, nullptr);
Xhemm<Complex>(queue, nullptr);
Xsyrk<Real>(queue, nullptr); Xsyrk<Complex>(queue, nullptr);
Xherk<Complex,Real>(queue, nullptr);
Xsyr2k<Real>(queue, nullptr); Xsyr2k<Complex>(queue, nullptr);
Xher2k<Complex,Real>(queue, nullptr);
Xtrmm<Real>(queue, nullptr); Xtrmm<Complex>(queue, nullptr);
// Runs all the non-BLAS set-up functions
Xomatcopy<Real>(queue, nullptr); Xomatcopy<Complex>(queue, nullptr);
} catch(const RuntimeErrorCode &e) {
if (e.status() != StatusCode::kNoDoublePrecision &&
e.status() != StatusCode::kNoHalfPrecision) {
throw;
}
}
}
// Fills the cache with all binaries for a specific device
// TODO: Add half-precision FP16 set-up calls
StatusCode FillCache(const cl_device_id device) {
try {
// Creates a sample context and queue to match the normal routine calling conventions
auto device_cpp = Device(device);
auto context = Context(device_cpp);
auto queue = Queue(context, device_cpp);
FillCacheForPrecision<float, float2>(queue);
FillCacheForPrecision<double, double2>(queue);
} catch (...) { return DispatchException(); }
return StatusCode::kSuccess;
}
// =================================================================================================
// Overrides the tuning parameters for this device-precision-kernel combination
StatusCode OverrideParameters(const cl_device_id device, const std::string &kernel_name,
const Precision precision,
const std::unordered_map<std::string,size_t> &parameters) {
try {
// Retrieves the device name
const auto device_cpp = Device(device);
const auto platform_id = device_cpp.PlatformID();
const auto device_name = GetDeviceName(device_cpp);
// Retrieves the current database values to verify whether the new ones are complete
auto in_cache = false;
auto current_database = DatabaseCache::Instance().Get(DatabaseKeyRef{platform_id, device, precision, kernel_name}, &in_cache);
if (!in_cache) {
log_debug("Searching database for kernel '" + kernel_name + "'");
current_database = Database(device_cpp, kernel_name, precision, {});
}
// Verifies the parameters size
const auto current_parameter_names = current_database.GetParameterNames();
if (current_parameter_names.size() != parameters.size()) {
return StatusCode::kMissingOverrideParameter;
}
// Retrieves the names and values separately and in the same order as the existing database
auto parameter_values = database::Params{0};
auto i = size_t{0};
for (const auto &current_param : current_parameter_names) {
if (parameters.find(current_param) == parameters.end()) {
return StatusCode::kMissingOverrideParameter;
}
const auto parameter_value = parameters.at(current_param);
parameter_values[i] = parameter_value;
++i;
}
// Creates a small custom database based on the provided parameters
const auto database_device = database::DatabaseDevice{database::kDeviceNameDefault, parameter_values};
const auto database_architecture = database::DatabaseArchitecture{"default", {database_device}};
const auto database_vendor = database::DatabaseVendor{database::kDeviceTypeAll, "default", {database_architecture}};
const auto database_entry = database::DatabaseEntry{kernel_name, precision, current_parameter_names, {database_vendor}};
const auto database_entries = std::vector<database::DatabaseEntry>{database_entry};
const auto database = Database(device_cpp, kernel_name, precision, database_entries);
// Removes the old database entry and stores the new one in the cache
DatabaseCache::Instance().Remove(DatabaseKey{platform_id, device, precision, kernel_name});
DatabaseCache::Instance().Store(DatabaseKey{platform_id, device, precision, kernel_name}, Database(database));
} catch (...) { return DispatchException(); }
return StatusCode::kSuccess;
}
// =================================================================================================
} // namespace clblast

2436
src/clblast_cuda.cpp 100644

File diff suppressed because it is too large Load Diff

View File

@ -668,6 +668,9 @@ class Buffer {
// Copies from host to device: writing the device buffer a-synchronously
void WriteAsync(const Queue &queue, const size_t size, const T* host, const size_t offset = 0) {
if (access_ == BufferAccess::kReadOnly) {
throw LogicError("Buffer: writing to a read-only buffer");
}
if (GetSize() < (offset+size)*sizeof(T)) {
throw LogicError("Buffer: target device buffer is too small");
}

782
src/cupp11.hpp 100644
View File

@ -0,0 +1,782 @@
// =================================================================================================
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
// width of 100 characters per line.
//
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file implements a bunch of C++11 classes that act as wrappers around OpenCL objects and API
// calls. The main benefits are increased abstraction, automatic memory management, and portability.
// Portability here means that a similar header exists for CUDA with the same classes and
// interfaces. In other words, moving from the OpenCL API to the CUDA API becomes a one-line change.
//
// This file is taken from the CLCudaAPI project <https://github.com/CNugteren/CLCudaAPI> and
// therefore contains the following header copyright notice:
//
// =================================================================================================
//
// Copyright 2015 SURFsara
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// =================================================================================================
#ifndef CLBLAST_CUPP11_H_
#define CLBLAST_CUPP11_H_
// C++
#include <algorithm> // std::copy
#include <string> // std::string
#include <vector> // std::vector
#include <memory> // std::shared_ptr
#include <cstring> // std::strlen
// CUDA
#define CUDA_NO_HALF // Incompatible with CLBlast's definition; TODO: resolve this
#include <cuda.h> // CUDA driver API
#include <nvrtc.h> // NVIDIA runtime compilation API
// Exception classes
#include "cxpp11_common.hpp"
namespace clblast {
// =================================================================================================
// Max-length of strings
constexpr auto kStringLength = 256;
// =================================================================================================
// Represents a runtime error returned by a CUDA driver API function
class CLCudaAPIError : public ErrorCode<DeviceError, CUresult> {
public:
explicit CLCudaAPIError(CUresult status, const std::string &where):
ErrorCode(status, where, "CUDA error: " + where + ": " +
GetErrorName(status) + " --> " + GetErrorString(status)) {
}
static void Check(const CUresult status, const std::string &where) {
if (status != CUDA_SUCCESS) {
throw CLCudaAPIError(status, where);
}
}
static void CheckDtor(const CUresult status, const std::string &where) {
if (status != CUDA_SUCCESS) {
fprintf(stderr, "CLCudaAPI: %s (ignoring)\n", CLCudaAPIError(status, where).what());
}
}
private:
std::string GetErrorName(CUresult status) const {
const char* status_code;
cuGetErrorName(status, &status_code);
return std::string(status_code);
}
std::string GetErrorString(CUresult status) const {
const char* status_string;
cuGetErrorString(status, &status_string);
return std::string(status_string);
}
};
// Represents a runtime error returned by a CUDA runtime compilation API function
class CLCudaAPINVRTCError : public ErrorCode<DeviceError, nvrtcResult> {
public:
explicit CLCudaAPINVRTCError(nvrtcResult status, const std::string &where):
ErrorCode(status, where, "CUDA NVRTC error: " + where + ": " + GetErrorString(status)) {
}
static void Check(const nvrtcResult status, const std::string &where) {
if (status != NVRTC_SUCCESS) {
throw CLCudaAPINVRTCError(status, where);
}
}
static void CheckDtor(const nvrtcResult status, const std::string &where) {
if (status != NVRTC_SUCCESS) {
fprintf(stderr, "CLCudaAPI: %s (ignoring)\n", CLCudaAPINVRTCError(status, where).what());
}
}
private:
std::string GetErrorString(nvrtcResult status) const {
const char* status_string = nvrtcGetErrorString(status);
return std::string(status_string);
}
};
// Exception returned when building a program
using CLCudaAPIBuildError = CLCudaAPINVRTCError;
// =================================================================================================
// Error occurred in CUDA driver or runtime compilation API
#define CheckError(call) CLCudaAPIError::Check(call, CLCudaAPIError::TrimCallString(#call))
#define CheckErrorNVRTC(call) CLCudaAPINVRTCError::Check(call, CLCudaAPINVRTCError::TrimCallString(#call))
// Error occurred in CUDA driver or runtime compilation API (no-exception version for destructors)
#define CheckErrorDtor(call) CLCudaAPIError::CheckDtor(call, CLCudaAPIError::TrimCallString(#call))
#define CheckErrorDtorNVRTC(call) CLCudaAPINVRTCError::CheckDtor(call, CLCudaAPINVRTCError::TrimCallString(#call))
// =================================================================================================
// C++11 version of two 'CUevent' pointers
class Event {
public:
// Note that there is no constructor based on the regular CUDA data-type because of extra state
// Regular constructor with memory management
explicit Event():
start_(new CUevent, [](CUevent* e) { CheckErrorDtor(cuEventDestroy(*e)); delete e; }),
end_(new CUevent, [](CUevent* e) { CheckErrorDtor(cuEventDestroy(*e)); delete e; }) {
CheckError(cuEventCreate(start_.get(), CU_EVENT_DEFAULT));
CheckError(cuEventCreate(end_.get(), CU_EVENT_DEFAULT));
}
// Waits for completion of this event (not implemented for CUDA)
void WaitForCompletion() const { } // not needed due to cuStreamSynchronize call after each kernel launch
// Retrieves the elapsed time of the last recorded event
float GetElapsedTime() const {
auto result = 0.0f;
cuEventElapsedTime(&result, *start_, *end_);
return result;
}
// Accessors to the private data-members
const CUevent& start() const { return *start_; }
const CUevent& end() const { return *end_; }
Event* pointer() { return this; }
private:
std::shared_ptr<CUevent> start_;
std::shared_ptr<CUevent> end_;
};
// Pointer to a CUDA event
using EventPointer = Event*;
// =================================================================================================
// Raw platform ID type
using RawPlatformID = size_t;
// The CUDA platform: initializes the CUDA driver API
class Platform {
public:
// Initializes the platform. Note that the platform ID variable is not actually used for CUDA.
explicit Platform(const size_t platform_id) : platform_id_(0) {
if (platform_id != 0) { throw LogicError("CUDA back-end requires a platform ID of 0"); }
CheckError(cuInit(0));
}
// Methods to retrieve platform information
std::string Name() const { return "CUDA"; }
std::string Vendor() const { return "NVIDIA Corporation"; }
std::string Version() const {
auto result = 0;
CheckError(cuDriverGetVersion(&result));
return "CUDA driver "+std::to_string(result);
}
// Returns the number of devices on this platform
size_t NumDevices() const {
auto result = 0;
CheckError(cuDeviceGetCount(&result));
return static_cast<size_t>(result);
}
// Accessor to the raw ID (which doesn't exist in the CUDA back-end, this is always just 0)
const RawPlatformID& operator()() const { return platform_id_; }
private:
const size_t platform_id_;
};
// Retrieves a vector with all platforms. Note that there is just one platform in CUDA.
inline std::vector<Platform> GetAllPlatforms() {
auto all_platforms = std::vector<Platform>{ Platform(size_t{0}) };
return all_platforms;
}
// =================================================================================================
// Raw device ID type
using RawDeviceID = CUdevice;
// C++11 version of 'CUdevice'
class Device {
public:
// Constructor based on the regular CUDA data-type
explicit Device(const CUdevice device): device_(device) { }
// Initialization
explicit Device(const Platform &platform, const size_t device_id) {
auto num_devices = platform.NumDevices();
if (num_devices == 0) {
throw RuntimeError("Device: no devices found");
}
if (device_id >= num_devices) {
throw RuntimeError("Device: invalid device ID "+std::to_string(device_id));
}
CheckError(cuDeviceGet(&device_, device_id));
}
// Methods to retrieve device information
RawPlatformID PlatformID() const { return 0; }
std::string Version() const {
auto result = 0;
CheckError(cuDriverGetVersion(&result));
return "CUDA driver "+std::to_string(result);
}
size_t VersionNumber() const {
auto result = 0;
CheckError(cuDriverGetVersion(&result));
return static_cast<size_t>(result);
}
std::string Vendor() const { return "NVIDIA Corporation"; }
std::string Name() const {
auto result = std::string{};
result.resize(kStringLength);
CheckError(cuDeviceGetName(&result[0], result.size(), device_));
result.resize(strlen(result.c_str())); // Removes any trailing '\0'-characters
return result;
}
std::string Type() const { return "GPU"; }
size_t MaxWorkGroupSize() const {return GetInfo(CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK); }
size_t MaxWorkItemDimensions() const { return size_t{3}; }
std::vector<size_t> MaxWorkItemSizes() const {
return std::vector<size_t>{GetInfo(CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X),
GetInfo(CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y),
GetInfo(CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z)};
}
unsigned long LocalMemSize() const {
return static_cast<unsigned long>(GetInfo(CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK));
}
std::string Capabilities() const {
const auto major = GetInfo(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR);
const auto minor = GetInfo(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR);
return "SM"+std::to_string(major)+"."+std::to_string(minor);
}
std::string ComputeArch() const {
const auto major = GetInfo(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR);
const auto minor = GetInfo(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR);
return "compute_"+std::to_string(major)+std::to_string(minor);
}
bool HasExtension(const std::string &extension) const { return false; }
bool SupportsFP64() const { return true; }
bool SupportsFP16() const {
const auto major = GetInfo(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR);
const auto minor = GetInfo(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR);
if (major > 5) { return true; } // SM 6.x, 7.x and higher
if (major == 5 && minor == 3) { return true; } // SM 5.3
return false;
}
size_t CoreClock() const { return 1e-3*GetInfo(CU_DEVICE_ATTRIBUTE_CLOCK_RATE); }
size_t ComputeUnits() const { return GetInfo(CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT); }
unsigned long MemorySize() const {
auto result = size_t{0};
CheckError(cuDeviceTotalMem(&result, device_));
return static_cast<unsigned long>(result);
}
unsigned long MaxAllocSize() const { return MemorySize(); }
size_t MemoryClock() const { return 1e-3*GetInfo(CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE); }
size_t MemoryBusWidth() const { return GetInfo(CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH); }
// Configuration-validity checks
bool IsLocalMemoryValid(const size_t local_mem_usage) const {
return (local_mem_usage <= LocalMemSize());
}
bool IsThreadConfigValid(const std::vector<size_t> &local) const {
auto local_size = size_t{1};
for (const auto &item: local) { local_size *= item; }
for (auto i=size_t{0}; i<local.size(); ++i) {
if (local[i] > MaxWorkItemSizes()[i]) { return false; }
}
if (local_size > MaxWorkGroupSize()) { return false; }
if (local.size() > MaxWorkItemDimensions()) { return false; }
return true;
}
// Query for a specific type of device or brand
bool IsCPU() const { return false; }
bool IsGPU() const { return true; }
bool IsAMD() const { return false; }
bool IsNVIDIA() const { return true; }
bool IsIntel() const { return false; }
bool IsARM() const { return false; }
// Platform specific extensions
std::string AMDBoardName() const { return ""; }
std::string NVIDIAComputeCapability() const { return Capabilities(); }
// Accessor to the private data-member
const RawDeviceID& operator()() const { return device_; }
private:
CUdevice device_;
// Private helper function
size_t GetInfo(const CUdevice_attribute info) const {
auto result = 0;
CheckError(cuDeviceGetAttribute(&result, info, device_));
return static_cast<size_t>(result);
}
};
// =================================================================================================
// Raw context type
using RawContext = CUcontext;
// C++11 version of 'CUcontext'
class Context {
public:
// Constructor based on the regular CUDA data-type: memory management is handled elsewhere
explicit Context(const CUcontext context):
context_(new CUcontext) {
*context_ = context;
}
// Regular constructor with memory management
explicit Context(const Device &device):
context_(new CUcontext, [](CUcontext* c) {
if (*c) { CheckErrorDtor(cuCtxDestroy(*c)); }
delete c;
}) {
CheckError(cuCtxCreate(context_.get(), 0, device()));
}
// Accessor to the private data-member
const RawContext& operator()() const { return *context_; }
RawContext* pointer() const { return &(*context_); }
private:
std::shared_ptr<CUcontext> context_;
};
// Pointer to a raw CUDA context
using ContextPointer = CUcontext*;
// =================================================================================================
// C++11 version of 'nvrtcProgram'. Additionally holds the program's source code.
class Program {
public:
Program() = default;
// Note that there is no constructor based on the regular CUDA data-type because of extra state
// Source-based constructor with memory management
explicit Program(const Context &, std::string source):
program_(new nvrtcProgram, [](nvrtcProgram* p) {
if (*p) { CheckErrorDtorNVRTC(nvrtcDestroyProgram(p)); }
delete p;
}),
source_(std::move(source)),
from_binary_(false) {
const auto source_ptr = &source_[0];
CheckErrorNVRTC(nvrtcCreateProgram(program_.get(), source_ptr, nullptr, 0, nullptr, nullptr));
}
// PTX-based constructor
explicit Program(const Device &device, const Context &context, const std::string &binary):
program_(nullptr), // not used
source_(binary),
from_binary_(true) {
}
// Compiles the device program and checks whether or not there are any warnings/errors
void Build(const Device &device, std::vector<std::string> &options) {
options.push_back("-arch=" + device.ComputeArch());
if (from_binary_) { return; }
auto raw_options = std::vector<const char*>();
for (const auto &option: options) {
raw_options.push_back(option.c_str());
}
auto status = nvrtcCompileProgram(*program_, raw_options.size(), raw_options.data());
CLCudaAPINVRTCError::Check(status, "nvrtcCompileProgram");
CheckError(cuModuleLoadDataEx(&module_, GetIR().data(), 0, nullptr, nullptr));
}
// Confirms whether a certain status code is an actual compilation error or warning
bool StatusIsCompilationWarningOrError(const nvrtcResult status) const {
return (status == NVRTC_ERROR_COMPILATION);
}
// Retrieves the warning/error message from the compiler (if any)
std::string GetBuildInfo(const Device &) const {
if (from_binary_) { return std::string{}; }
auto bytes = size_t{0};
CheckErrorNVRTC(nvrtcGetProgramLogSize(*program_, &bytes));
auto result = std::string{};
result.resize(bytes);
CheckErrorNVRTC(nvrtcGetProgramLog(*program_, &result[0]));
return result;
}
// Retrieves an intermediate representation of the compiled program (i.e. PTX)
std::string GetIR() const {
if (from_binary_) { return source_; } // holds the PTX
auto bytes = size_t{0};
CheckErrorNVRTC(nvrtcGetPTXSize(*program_, &bytes));
auto result = std::string{};
result.resize(bytes);
CheckErrorNVRTC(nvrtcGetPTX(*program_, &result[0]));
return result;
}
// Accessor to the private data-members
const CUmodule GetModule() const { return module_; }
const nvrtcProgram& operator()() const { return *program_; }
private:
std::shared_ptr<nvrtcProgram> program_;
CUmodule module_;
std::string source_;
bool from_binary_;
};
// =================================================================================================
// Raw command-queue type
using RawCommandQueue = CUstream;
// C++11 version of 'CUstream'
class Queue {
public:
// Note that there is no constructor based on the regular CUDA data-type because of extra state
// Regular constructor with memory management
explicit Queue(const Context &context, const Device &device):
queue_(new CUstream, [](CUstream* s) {
if (*s) { CheckErrorDtor(cuStreamDestroy(*s)); }
delete s;
}),
context_(context),
device_(device) {
CheckError(cuStreamCreate(queue_.get(), CU_STREAM_NON_BLOCKING));
}
// Synchronizes the queue and optionally also an event
void Finish(Event &event) const {
CheckError(cuEventSynchronize(event.end()));
Finish();
}
void Finish() const {
CheckError(cuStreamSynchronize(*queue_));
}
// Retrieves the corresponding context or device
Context GetContext() const { return context_; }
Device GetDevice() const { return device_; }
// Accessor to the private data-member
const RawCommandQueue& operator()() const { return *queue_; }
private:
std::shared_ptr<CUstream> queue_;
const Context context_;
const Device device_;
};
// =================================================================================================
// C++11 version of page-locked host memory
template <typename T>
class BufferHost {
public:
// Regular constructor with memory management
explicit BufferHost(const Context &, const size_t size):
buffer_(new void*, [](void** m) { CheckError(cuMemFreeHost(*m)); delete m; }),
size_(size) {
CheckError(cuMemAllocHost(buffer_.get(), size*sizeof(T)));
}
// Retrieves the actual allocated size in bytes
size_t GetSize() const {
return size_*sizeof(T);
}
// Compatibility with std::vector
size_t size() const { return size_; }
T* begin() { return &static_cast<T*>(*buffer_)[0]; }
T* end() { return &static_cast<T*>(*buffer_)[size_-1]; }
T& operator[](const size_t i) { return static_cast<T*>(*buffer_)[i]; }
T* data() { return static_cast<T*>(*buffer_); }
const T* data() const { return static_cast<T*>(*buffer_); }
private:
std::shared_ptr<void*> buffer_;
const size_t size_;
};
// =================================================================================================
// Enumeration of buffer access types
enum class BufferAccess { kReadOnly, kWriteOnly, kReadWrite, kNotOwned };
// C++11 version of 'CUdeviceptr'
template <typename T>
class Buffer {
public:
// Constructor based on the regular CUDA data-type: memory management is handled elsewhere
explicit Buffer(const CUdeviceptr buffer):
buffer_(new CUdeviceptr),
access_(BufferAccess::kNotOwned) {
*buffer_ = buffer;
}
// Regular constructor with memory management. If this class does not own the buffer object, then
// the memory will not be freed automatically afterwards.
explicit Buffer(const Context &, const BufferAccess access, const size_t size):
buffer_(new CUdeviceptr, [access](CUdeviceptr* m) {
if (access != BufferAccess::kNotOwned) { CheckError(cuMemFree(*m)); }
delete m;
}),
access_(access) {
CheckError(cuMemAlloc(buffer_.get(), size*sizeof(T)));
}
// As above, but now with read/write access as a default
explicit Buffer(const Context &context, const size_t size):
Buffer<T>(context, BufferAccess::kReadWrite, size) {
}
// Constructs a new buffer based on an existing host-container
template <typename Iterator>
explicit Buffer(const Context &context, const Queue &queue, Iterator start, Iterator end):
Buffer(context, BufferAccess::kReadWrite, static_cast<size_t>(end - start)) {
auto size = static_cast<size_t>(end - start);
auto pointer = &*start;
CheckError(cuMemcpyHtoDAsync(*buffer_, pointer, size*sizeof(T), queue()));
queue.Finish();
}
// Copies from device to host: reading the device buffer a-synchronously
void ReadAsync(const Queue &queue, const size_t size, T* host, const size_t offset = 0) const {
if (access_ == BufferAccess::kWriteOnly) {
throw LogicError("Buffer: reading from a write-only buffer");
}
CheckError(cuMemcpyDtoHAsync(host, *buffer_ + offset*sizeof(T), size*sizeof(T), queue()));
}
void ReadAsync(const Queue &queue, const size_t size, std::vector<T> &host,
const size_t offset = 0) const {
if (host.size() < size) {
throw LogicError("Buffer: target host buffer is too small");
}
ReadAsync(queue, size, host.data(), offset);
}
void ReadAsync(const Queue &queue, const size_t size, BufferHost<T> &host,
const size_t offset = 0) const {
if (host.size() < size) {
throw LogicError("Buffer: target host buffer is too small");
}
ReadAsync(queue, size, host.data(), offset);
}
// Copies from device to host: reading the device buffer
void Read(const Queue &queue, const size_t size, T* host, const size_t offset = 0) const {
ReadAsync(queue, size, host, offset);
queue.Finish();
}
void Read(const Queue &queue, const size_t size, std::vector<T> &host,
const size_t offset = 0) const {
Read(queue, size, host.data(), offset);
}
void Read(const Queue &queue, const size_t size, BufferHost<T> &host,
const size_t offset = 0) const {
Read(queue, size, host.data(), offset);
}
// Copies from host to device: writing the device buffer a-synchronously
void WriteAsync(const Queue &queue, const size_t size, const T* host, const size_t offset = 0) {
if (access_ == BufferAccess::kReadOnly) {
throw LogicError("Buffer: writing to a read-only buffer");
}
if (GetSize() < (offset+size)*sizeof(T)) {
throw LogicError("Buffer: target device buffer is too small");
}
CheckError(cuMemcpyHtoDAsync(*buffer_ + offset*sizeof(T), host, size*sizeof(T), queue()));
}
void WriteAsync(const Queue &queue, const size_t size, const std::vector<T> &host,
const size_t offset = 0) {
WriteAsync(queue, size, host.data(), offset);
}
void WriteAsync(const Queue &queue, const size_t size, const BufferHost<T> &host,
const size_t offset = 0) {
WriteAsync(queue, size, host.data(), offset);
}
// Copies from host to device: writing the device buffer
void Write(const Queue &queue, const size_t size, const T* host, const size_t offset = 0) {
WriteAsync(queue, size, host, offset);
queue.Finish();
}
void Write(const Queue &queue, const size_t size, const std::vector<T> &host,
const size_t offset = 0) {
Write(queue, size, host.data(), offset);
}
void Write(const Queue &queue, const size_t size, const BufferHost<T> &host,
const size_t offset = 0) {
Write(queue, size, host.data(), offset);
}
// Copies the contents of this buffer into another device buffer
void CopyToAsync(const Queue &queue, const size_t size, const Buffer<T> &destination) const {
CheckError(cuMemcpyDtoDAsync(destination(), *buffer_, size*sizeof(T), queue()));
}
void CopyTo(const Queue &queue, const size_t size, const Buffer<T> &destination) const {
CopyToAsync(queue, size, destination);
queue.Finish();
}
// Retrieves the actual allocated size in bytes
size_t GetSize() const {
auto result = size_t{0};
CheckError(cuMemGetAddressRange(nullptr, &result, *buffer_));
return result;
}
// Accessors to the private data-members
CUdeviceptr operator()() const { return *buffer_; }
CUdeviceptr& operator()() { return *buffer_; }
private:
std::shared_ptr<CUdeviceptr> buffer_;
const BufferAccess access_;
};
// =================================================================================================
// C++11 version of 'CUfunction'
class Kernel {
public:
// Constructor based on the regular CUDA data-type: memory management is handled elsewhere
explicit Kernel(const CUfunction kernel):
name_("unknown"),
kernel_(kernel) {
}
// Regular constructor with memory management
explicit Kernel(const Program &program, const std::string &name): name_(name) {
CheckError(cuModuleGetFunction(&kernel_, program.GetModule(), name.c_str()));
}
// Sets a kernel argument at the indicated position. This stores both the value of the argument
// (as raw bytes) and the index indicating where this value can be found.
template <typename T>
void SetArgument(const size_t index, const T &value) {
if (index >= arguments_indices_.size()) { arguments_indices_.resize(index+1); }
arguments_indices_[index] = arguments_data_.size();
for (auto j=size_t(0); j<sizeof(T); ++j) {
arguments_data_.push_back(reinterpret_cast<const char*>(&value)[j]);
}
}
template <typename T>
void SetArgument(const size_t index, Buffer<T> &value) {
SetArgument(index, value());
}
// Sets all arguments in one go using parameter packs. Note that this resets all previously set
// arguments using 'SetArgument' or 'SetArguments'.
template <typename... Args>
void SetArguments(Args&... args) {
arguments_indices_.clear();
arguments_data_.clear();
SetArgumentsRecursive(0, args...);
}
// Retrieves the amount of local memory used per work-group for this kernel. Note that this the
// shared memory in CUDA terminology.
unsigned long LocalMemUsage(const Device &) const {
auto result = 0;
CheckError(cuFuncGetAttribute(&result, CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES, kernel_));
return static_cast<unsigned long>(result);
}
// Retrieves the name of the kernel
std::string GetFunctionName() const {
return name_;
}
// Launches a kernel onto the specified queue
void Launch(const Queue &queue, const std::vector<size_t> &global,
const std::vector<size_t> &local, EventPointer event) {
// TODO: Currently this CUDA launch is always synchronous due to a cuStreamSynchronize call
if (local.size() == 0) {
throw LogicError("Kernel: launching with a default workgroup size is not implemented for the CUDA back-end");
}
// Creates the grid (number of threadblocks) and sets the block sizes (threads per block)
auto grid = std::vector<size_t>{1, 1, 1};
auto block = std::vector<size_t>{1, 1, 1};
if (global.size() != local.size()) { throw LogicError("invalid thread/workgroup dimensions"); }
for (auto i=size_t{0}; i<local.size(); ++i) { grid[i] = global[i]/local[i]; }
for (auto i=size_t{0}; i<local.size(); ++i) { block[i] = local[i]; }
// Creates the array of pointers from the arrays of indices & data
std::vector<void*> pointers;
for (auto &index: arguments_indices_) {
pointers.push_back(&arguments_data_[index]);
}
// Launches the kernel, its execution time is recorded by events
if (event) { CheckError(cuEventRecord(event->start(), queue())); }
CheckError(cuLaunchKernel(kernel_, grid[0], grid[1], grid[2], block[0], block[1], block[2],
0, queue(), pointers.data(), nullptr));
cuStreamSynchronize(queue());
if (event) { CheckError(cuEventRecord(event->end(), queue())); }
}
// As above, but with an event waiting list
void Launch(const Queue &queue, const std::vector<size_t> &global,
const std::vector<size_t> &local, EventPointer event,
const std::vector<Event>& waitForEvents) {
for (auto &waitEvent : waitForEvents) {
waitEvent.WaitForCompletion(); // note: doesn't do anything, every kernel call is synchronous
}
return Launch(queue, global, local, event);
}
// Accessors to the private data-members
const CUfunction& operator()() const { return kernel_; }
CUfunction operator()() { return kernel_; }
private:
const std::string name_;
CUfunction kernel_;
std::vector<size_t> arguments_indices_; // Indices of the arguments
std::vector<char> arguments_data_; // The arguments data as raw bytes
// Internal implementation for the recursive SetArguments function.
template <typename T>
void SetArgumentsRecursive(const size_t index, T &first) {
SetArgument(index, first);
}
template <typename T, typename... Args>
void SetArgumentsRecursive(const size_t index, T &first, Args&... args) {
SetArgument(index, first);
SetArgumentsRecursive(index+1, args...);
}
};
// =================================================================================================
} // namespace clblast
// CLBLAST_CUPP11_H_
#endif

View File

@ -15,6 +15,7 @@
#ifndef CLBLAST_CXPP11_COMMON_H_
#define CLBLAST_CXPP11_COMMON_H_
#include <cstring> // strchr
#include <string> // std::string
#include <stdexcept> // std::runtime_error

View File

@ -24,14 +24,16 @@ R"(
// =================================================================================================
// Enable support for double-precision
#if PRECISION == 16
#pragma OPENCL EXTENSION cl_khr_fp16: enable
#endif
#ifndef CUDA
// Enable support for double-precision
#if PRECISION == 16
#pragma OPENCL EXTENSION cl_khr_fp16: enable
#endif
// Enable support for double-precision
#if PRECISION == 64 || PRECISION == 6464
#pragma OPENCL EXTENSION cl_khr_fp64: enable
// Enable support for double-precision
#if PRECISION == 64 || PRECISION == 6464
#pragma OPENCL EXTENSION cl_khr_fp64: enable
#endif
#endif
// Half-precision
@ -117,10 +119,15 @@ R"(
#define GetRealArg(x) x
#endif
// Pointers to local memory objects (using a define because CUDA doesn't need them)
#ifndef LOCAL_PTR
#define LOCAL_PTR __local
#endif
// =================================================================================================
// Don't use the non-IEEE754 compliant OpenCL built-in mad() instruction per default. For specific
// devices, this is enabled (see src/routine.cc).
// devices, this is enabled (see src/routine.cpp).
#ifndef USE_CL_MAD
#define USE_CL_MAD 0
#endif
@ -254,18 +261,18 @@ R"(
// http://docs.nvidia.com/cuda/samples/6_Advanced/transpose/doc/MatrixTranspose.pdf
// More details: https://github.com/CNugteren/CLBlast/issues/53
#if USE_STAGGERED_INDICES == 1
INLINE_FUNC size_t GetGroupIDFlat() {
INLINE_FUNC int GetGroupIDFlat() {
return get_group_id(0) + get_num_groups(0) * get_group_id(1);
}
INLINE_FUNC size_t GetGroupID1() {
INLINE_FUNC int GetGroupID1() {
return (GetGroupIDFlat()) % get_num_groups(1);
}
INLINE_FUNC size_t GetGroupID0() {
INLINE_FUNC int GetGroupID0() {
return ((GetGroupIDFlat() / get_num_groups(1)) + GetGroupID1()) % get_num_groups(0);
}
#else
INLINE_FUNC size_t GetGroupID1() { return get_group_id(1); }
INLINE_FUNC size_t GetGroupID0() { return get_group_id(0); }
INLINE_FUNC int GetGroupID1() { return get_group_id(1); }
INLINE_FUNC int GetGroupID0() { return get_group_id(0); }
#endif
// =================================================================================================

View File

@ -34,7 +34,7 @@ R"(
// Returns an element from a vector
INLINE_FUNC real LoadVector(const int id, const int max,
__global real* gm, const int offset, const int inc,
const __global real* gm, const int offset, const int inc,
const int do_conjugate) {
if (id < max) {
real result = gm[id*inc + offset];

View File

@ -164,7 +164,7 @@ void InvertDiagonalBlock(int n, __global const real* restrict src, const int src
// =================================================================================================
// Triple matrix-multiplication kernel: C = A * B
INLINE_FUNC void TripleMatMul(const int size, const bool upper, const int part, __local real* blm, int n,
INLINE_FUNC void TripleMatMul(const int size, const bool upper, const int part, LOCAL_PTR real* blm, int n,
__global const real* agm, __global const real* bgm, __global real* cgm,
const int lda, const int ldb, const int ldc,
int current_size, int num_pages, const int block_size) {
@ -250,7 +250,7 @@ INLINE_FUNC void TripleMatMul(const int size, const bool upper, const int part,
// =================================================================================================
// Triple matrix-multiplication kernel part 1: B12 = A12 * B22 (upper) or B21 = A21 * B11 (lower)
INLINE_FUNC void TripleMatMulPart1(const int size, const bool upper, __local real* blm, int n,
INLINE_FUNC void TripleMatMulPart1(const int size, const bool upper, LOCAL_PTR real* blm, int n,
__global const real* src, const int a_offset, const int lda,
__global real* dest, int current_size, int num_pages, const int block_size) {
@ -286,7 +286,7 @@ INLINE_FUNC void TripleMatMulPart1(const int size, const bool upper, __local rea
}
// Triple matrix-multiplication kernel part 1: B12 = -B11 * B12 (upper) or B21 = -B22 * B21 (lower)
INLINE_FUNC void TripleMatMulPart2(const int size, const bool upper, __local real* blm, const int n,
INLINE_FUNC void TripleMatMulPart2(const int size, const bool upper, LOCAL_PTR real* blm, const int n,
__global real* dest, int current_size, int num_pages, const int block_size) {
// Emulates a 3D grid: NX * (NY * num_pages)

View File

@ -84,39 +84,39 @@ void TransposeMatrixFast(const int ld,
#if TRA_WPT == 1
results[0] = v[0];
#elif TRA_WPT == 2
results[0] = (realT) {v[0].x, v[1].x};
results[1] = (realT) {v[0].y, v[1].y};
results[0].x = v[0].x; results[0].y = v[1].x;
results[1].x = v[0].y; results[1].y = v[1].y;
#elif TRA_WPT == 4
results[0] = (realT) {v[0].x, v[1].x, v[2].x, v[3].x};
results[1] = (realT) {v[0].y, v[1].y, v[2].y, v[3].y};
results[2] = (realT) {v[0].z, v[1].z, v[2].z, v[3].z};
results[3] = (realT) {v[0].w, v[1].w, v[2].w, v[3].w};
results[0].x = v[0].x; results[0].y = v[1].x; results[0].z = v[2].x; results[0].w = v[3].x;
results[1].x = v[0].y; results[1].y = v[1].y; results[1].z = v[2].y; results[1].w = v[3].y;
results[2].x = v[0].z; results[2].y = v[1].z; results[2].z = v[2].z; results[2].w = v[3].z;
results[3].x = v[0].w; results[3].y = v[1].w; results[3].z = v[2].w; results[3].w = v[3].w;
#elif TRA_WPT == 8
results[0] = (realT) {v[0].s0, v[1].s0, v[2].s0, v[3].s0, v[4].s0, v[5].s0, v[6].s0, v[7].s0};
results[1] = (realT) {v[0].s1, v[1].s1, v[2].s1, v[3].s1, v[4].s1, v[5].s1, v[6].s1, v[7].s1};
results[2] = (realT) {v[0].s2, v[1].s2, v[2].s2, v[3].s2, v[4].s2, v[5].s2, v[6].s2, v[7].s2};
results[3] = (realT) {v[0].s3, v[1].s3, v[2].s3, v[3].s3, v[4].s3, v[5].s3, v[6].s3, v[7].s3};
results[4] = (realT) {v[0].s4, v[1].s4, v[2].s4, v[3].s4, v[4].s4, v[5].s4, v[6].s4, v[7].s4};
results[5] = (realT) {v[0].s5, v[1].s5, v[2].s5, v[3].s5, v[4].s5, v[5].s5, v[6].s5, v[7].s5};
results[6] = (realT) {v[0].s6, v[1].s6, v[2].s6, v[3].s6, v[4].s6, v[5].s6, v[6].s6, v[7].s6};
results[7] = (realT) {v[0].s7, v[1].s7, v[2].s7, v[3].s7, v[4].s7, v[5].s7, v[6].s7, v[7].s7};
results[0].s0 = v[0].s0; results[0].s1 = v[1].s0; results[0].s2 = v[2].s0; results[0].s3 = v[3].s0; results[0].s4 = v[4].s0; results[0].s5 = v[5].s0; results[0].s6 = v[6].s0; results[0].s7 = v[7].s0;
results[1].s0 = v[0].s1; results[1].s1 = v[1].s1; results[1].s2 = v[2].s1; results[1].s3 = v[3].s1; results[1].s4 = v[4].s1; results[1].s5 = v[5].s1; results[1].s6 = v[6].s1; results[1].s7 = v[7].s1;
results[2].s0 = v[0].s2; results[2].s1 = v[1].s2; results[2].s2 = v[2].s2; results[2].s3 = v[3].s2; results[2].s4 = v[4].s2; results[2].s5 = v[5].s2; results[2].s6 = v[6].s2; results[2].s7 = v[7].s2;
results[3].s0 = v[0].s3; results[3].s1 = v[1].s3; results[3].s2 = v[2].s3; results[3].s3 = v[3].s3; results[3].s4 = v[4].s3; results[3].s5 = v[5].s3; results[3].s6 = v[6].s3; results[3].s7 = v[7].s3;
results[4].s0 = v[0].s4; results[4].s1 = v[1].s4; results[4].s2 = v[2].s4; results[4].s3 = v[3].s4; results[4].s4 = v[4].s4; results[4].s5 = v[5].s4; results[4].s6 = v[6].s4; results[4].s7 = v[7].s4;
results[5].s0 = v[0].s5; results[5].s1 = v[1].s5; results[5].s2 = v[2].s5; results[5].s3 = v[3].s5; results[5].s4 = v[4].s5; results[5].s5 = v[5].s5; results[5].s6 = v[6].s5; results[5].s7 = v[7].s5;
results[6].s0 = v[0].s6; results[6].s1 = v[1].s6; results[6].s2 = v[2].s6; results[6].s3 = v[3].s6; results[6].s4 = v[4].s6; results[6].s5 = v[5].s6; results[6].s6 = v[6].s6; results[6].s7 = v[7].s6;
results[7].s0 = v[0].s7; results[7].s1 = v[1].s7; results[7].s2 = v[2].s7; results[7].s3 = v[3].s7; results[7].s4 = v[4].s7; results[7].s5 = v[5].s7; results[7].s6 = v[6].s7; results[7].s7 = v[7].s7;
#elif TRA_WPT == 16
results[ 0] = (realT) {v[0].s0, v[1].s0, v[2].s0, v[3].s0, v[4].s0, v[5].s0, v[6].s0, v[7].s0, v[8].s0, v[9].s0, v[10].s0, v[11].s0, v[12].s0, v[13].s0, v[14].s0, v[15].s0};
results[ 1] = (realT) {v[0].s1, v[1].s1, v[2].s1, v[3].s1, v[4].s1, v[5].s1, v[6].s1, v[7].s1, v[8].s1, v[9].s1, v[10].s1, v[11].s1, v[12].s1, v[13].s1, v[14].s1, v[15].s1};
results[ 2] = (realT) {v[0].s2, v[1].s2, v[2].s2, v[3].s2, v[4].s2, v[5].s2, v[6].s2, v[7].s2, v[8].s2, v[9].s2, v[10].s2, v[11].s2, v[12].s2, v[13].s2, v[14].s2, v[15].s2};
results[ 3] = (realT) {v[0].s3, v[1].s3, v[2].s3, v[3].s3, v[4].s3, v[5].s3, v[6].s3, v[7].s3, v[8].s3, v[9].s3, v[10].s3, v[11].s3, v[12].s3, v[13].s3, v[14].s3, v[15].s3};
results[ 4] = (realT) {v[0].s4, v[1].s4, v[2].s4, v[3].s4, v[4].s4, v[5].s4, v[6].s4, v[7].s4, v[8].s4, v[9].s4, v[10].s4, v[11].s4, v[12].s4, v[13].s4, v[14].s4, v[15].s4};
results[ 5] = (realT) {v[0].s5, v[1].s5, v[2].s5, v[3].s5, v[4].s5, v[5].s5, v[6].s5, v[7].s5, v[8].s5, v[9].s5, v[10].s5, v[11].s5, v[12].s5, v[13].s5, v[14].s5, v[15].s5};
results[ 6] = (realT) {v[0].s6, v[1].s6, v[2].s6, v[3].s6, v[4].s6, v[5].s6, v[6].s6, v[7].s6, v[8].s6, v[9].s6, v[10].s6, v[11].s6, v[12].s6, v[13].s6, v[14].s6, v[15].s6};
results[ 7] = (realT) {v[0].s7, v[1].s7, v[2].s7, v[3].s7, v[4].s7, v[5].s7, v[6].s7, v[7].s7, v[8].s7, v[9].s7, v[10].s7, v[11].s7, v[12].s7, v[13].s7, v[14].s7, v[15].s7};
results[ 8] = (realT) {v[0].s8, v[1].s8, v[2].s8, v[3].s8, v[4].s8, v[5].s8, v[6].s8, v[7].s8, v[8].s8, v[9].s8, v[10].s8, v[11].s8, v[12].s8, v[13].s8, v[14].s8, v[15].s8};
results[ 9] = (realT) {v[0].s9, v[1].s9, v[2].s9, v[3].s9, v[4].s9, v[5].s9, v[6].s9, v[7].s9, v[8].s9, v[9].s9, v[10].s9, v[11].s9, v[12].s9, v[13].s9, v[14].s9, v[15].s9};
results[10] = (realT) {v[0].sA, v[1].sA, v[2].sA, v[3].sA, v[4].sA, v[5].sA, v[6].sA, v[7].sA, v[8].sA, v[9].sA, v[10].sA, v[11].sA, v[12].sA, v[13].sA, v[14].sA, v[15].sA};
results[11] = (realT) {v[0].sB, v[1].sB, v[2].sB, v[3].sB, v[4].sB, v[5].sB, v[6].sB, v[7].sB, v[8].sB, v[9].sB, v[10].sB, v[11].sB, v[12].sB, v[13].sB, v[14].sB, v[15].sB};
results[12] = (realT) {v[0].sC, v[1].sC, v[2].sC, v[3].sC, v[4].sC, v[5].sC, v[6].sC, v[7].sC, v[8].sC, v[9].sC, v[10].sC, v[11].sC, v[12].sC, v[13].sC, v[14].sC, v[15].sC};
results[13] = (realT) {v[0].sD, v[1].sD, v[2].sD, v[3].sD, v[4].sD, v[5].sD, v[6].sD, v[7].sD, v[8].sD, v[9].sD, v[10].sD, v[11].sD, v[12].sD, v[13].sD, v[14].sD, v[15].sD};
results[14] = (realT) {v[0].sE, v[1].sE, v[2].sE, v[3].sE, v[4].sE, v[5].sE, v[6].sE, v[7].sE, v[8].sE, v[9].sE, v[10].sE, v[11].sE, v[12].sE, v[13].sE, v[14].sE, v[15].sE};
results[15] = (realT) {v[0].sF, v[1].sF, v[2].sF, v[3].sF, v[4].sF, v[5].sF, v[6].sF, v[7].sF, v[8].sF, v[9].sF, v[10].sF, v[11].sF, v[12].sF, v[13].sF, v[14].sF, v[15].sF};
results[ 0].s0 = v[0].s0; results[ 0].s1 = v[1].s0; results[ 0].s2 = v[2].s0; results[ 0].s3 = v[3].s0; results[ 0].s4 = v[4].s0; results[ 0].s5 = v[5].s0; results[ 0].s6 = v[6].s0; results[ 0].s7 = v[7].s0; results[ 0].s8 = v[8].s0; results[ 0].s9 = v[9].s0; results[ 0].sA = v[10].s0; results[ 0].sB = v[11].s0; results[ 0].sC = v[12].s0; results[ 0].sD = v[13].s0; results[ 0].sE = v[14].s0; results[ 0].sF = v[15].s0;
results[ 1].s0 = v[0].s1; results[ 1].s1 = v[1].s1; results[ 1].s2 = v[2].s1; results[ 1].s3 = v[3].s1; results[ 1].s4 = v[4].s1; results[ 1].s5 = v[5].s1; results[ 1].s6 = v[6].s1; results[ 1].s7 = v[7].s1; results[ 1].s8 = v[8].s1; results[ 1].s9 = v[9].s1; results[ 1].sA = v[10].s1; results[ 1].sB = v[11].s1; results[ 1].sC = v[12].s1; results[ 1].sD = v[13].s1; results[ 1].sE = v[14].s1; results[ 1].sF = v[15].s1;
results[ 2].s0 = v[0].s2; results[ 2].s1 = v[1].s2; results[ 2].s2 = v[2].s2; results[ 2].s3 = v[3].s2; results[ 2].s4 = v[4].s2; results[ 2].s5 = v[5].s2; results[ 2].s6 = v[6].s2; results[ 2].s7 = v[7].s2; results[ 2].s8 = v[8].s2; results[ 2].s9 = v[9].s2; results[ 2].sA = v[10].s2; results[ 2].sB = v[11].s2; results[ 2].sC = v[12].s2; results[ 2].sD = v[13].s2; results[ 2].sE = v[14].s2; results[ 2].sF = v[15].s2;
results[ 3].s0 = v[0].s3; results[ 3].s1 = v[1].s3; results[ 3].s2 = v[2].s3; results[ 3].s3 = v[3].s3; results[ 3].s4 = v[4].s3; results[ 3].s5 = v[5].s3; results[ 3].s6 = v[6].s3; results[ 3].s7 = v[7].s3; results[ 3].s8 = v[8].s3; results[ 3].s9 = v[9].s3; results[ 3].sA = v[10].s3; results[ 3].sB = v[11].s3; results[ 3].sC = v[12].s3; results[ 3].sD = v[13].s3; results[ 3].sE = v[14].s3; results[ 3].sF = v[15].s3;
results[ 4].s0 = v[0].s4; results[ 4].s1 = v[1].s4; results[ 4].s2 = v[2].s4; results[ 4].s3 = v[3].s4; results[ 4].s4 = v[4].s4; results[ 4].s5 = v[5].s4; results[ 4].s6 = v[6].s4; results[ 4].s7 = v[7].s4; results[ 4].s8 = v[8].s4; results[ 4].s9 = v[9].s4; results[ 4].sA = v[10].s4; results[ 4].sB = v[11].s4; results[ 4].sC = v[12].s4; results[ 4].sD = v[13].s4; results[ 4].sE = v[14].s4; results[ 4].sF = v[15].s4;
results[ 5].s0 = v[0].s5; results[ 5].s1 = v[1].s5; results[ 5].s2 = v[2].s5; results[ 5].s3 = v[3].s5; results[ 5].s4 = v[4].s5; results[ 5].s5 = v[5].s5; results[ 5].s6 = v[6].s5; results[ 5].s7 = v[7].s5; results[ 5].s8 = v[8].s5; results[ 5].s9 = v[9].s5; results[ 5].sA = v[10].s5; results[ 5].sB = v[11].s5; results[ 5].sC = v[12].s5; results[ 5].sD = v[13].s5; results[ 5].sE = v[14].s5; results[ 5].sF = v[15].s5;
results[ 6].s0 = v[0].s6; results[ 6].s1 = v[1].s6; results[ 6].s2 = v[2].s6; results[ 6].s3 = v[3].s6; results[ 6].s4 = v[4].s6; results[ 6].s5 = v[5].s6; results[ 6].s6 = v[6].s6; results[ 6].s7 = v[7].s6; results[ 6].s8 = v[8].s6; results[ 6].s9 = v[9].s6; results[ 6].sA = v[10].s6; results[ 6].sB = v[11].s6; results[ 6].sC = v[12].s6; results[ 6].sD = v[13].s6; results[ 6].sE = v[14].s6; results[ 6].sF = v[15].s6;
results[ 7].s0 = v[0].s7; results[ 7].s1 = v[1].s7; results[ 7].s2 = v[2].s7; results[ 7].s3 = v[3].s7; results[ 7].s4 = v[4].s7; results[ 7].s5 = v[5].s7; results[ 7].s6 = v[6].s7; results[ 7].s7 = v[7].s7; results[ 7].s8 = v[8].s7; results[ 7].s9 = v[9].s7; results[ 7].sA = v[10].s7; results[ 7].sB = v[11].s7; results[ 7].sC = v[12].s7; results[ 7].sD = v[13].s7; results[ 7].sE = v[14].s7; results[ 7].sF = v[15].s7;
results[ 8].s0 = v[0].s8; results[ 8].s1 = v[1].s8; results[ 8].s2 = v[2].s8; results[ 8].s3 = v[3].s8; results[ 8].s4 = v[4].s8; results[ 8].s5 = v[5].s8; results[ 8].s6 = v[6].s8; results[ 8].s7 = v[7].s8; results[ 8].s8 = v[8].s8; results[ 8].s9 = v[9].s8; results[ 8].sA = v[10].s8; results[ 8].sB = v[11].s8; results[ 8].sC = v[12].s8; results[ 8].sD = v[13].s8; results[ 8].sE = v[14].s8; results[ 8].sF = v[15].s8;
results[ 9].s0 = v[0].s9; results[ 9].s1 = v[1].s9; results[ 9].s2 = v[2].s9; results[ 9].s3 = v[3].s9; results[ 9].s4 = v[4].s9; results[ 9].s5 = v[5].s9; results[ 9].s6 = v[6].s9; results[ 9].s7 = v[7].s9; results[ 9].s8 = v[8].s9; results[ 9].s9 = v[9].s9; results[ 9].sA = v[10].s9; results[ 9].sB = v[11].s9; results[ 9].sC = v[12].s9; results[ 9].sD = v[13].s9; results[ 9].sE = v[14].s9; results[ 9].sF = v[15].s9;
results[10].s0 = v[0].sA; results[10].s1 = v[1].sA; results[10].s2 = v[2].sA; results[10].s3 = v[3].sA; results[10].s4 = v[4].sA; results[10].s5 = v[5].sA; results[10].s6 = v[6].sA; results[10].s7 = v[7].sA; results[10].s8 = v[8].sA; results[10].s9 = v[9].sA; results[10].sA = v[10].sA; results[10].sB = v[11].sA; results[10].sC = v[12].sA; results[10].sD = v[13].sA; results[10].sE = v[14].sA; results[10].sF = v[15].sA;
results[11].s0 = v[0].sB; results[11].s1 = v[1].sB; results[11].s2 = v[2].sB; results[11].s3 = v[3].sB; results[11].s4 = v[4].sB; results[11].s5 = v[5].sB; results[11].s6 = v[6].sB; results[11].s7 = v[7].sB; results[11].s8 = v[8].sB; results[11].s9 = v[9].sB; results[11].sA = v[10].sB; results[11].sB = v[11].sB; results[11].sC = v[12].sB; results[11].sD = v[13].sB; results[11].sE = v[14].sB; results[11].sF = v[15].sB;
results[12].s0 = v[0].sC; results[12].s1 = v[1].sC; results[12].s2 = v[2].sC; results[12].s3 = v[3].sC; results[12].s4 = v[4].sC; results[12].s5 = v[5].sC; results[12].s6 = v[6].sC; results[12].s7 = v[7].sC; results[12].s8 = v[8].sC; results[12].s9 = v[9].sC; results[12].sA = v[10].sC; results[12].sB = v[11].sC; results[12].sC = v[12].sC; results[12].sD = v[13].sC; results[12].sE = v[14].sC; results[12].sF = v[15].sC;
results[13].s0 = v[0].sD; results[13].s1 = v[1].sD; results[13].s2 = v[2].sD; results[13].s3 = v[3].sD; results[13].s4 = v[4].sD; results[13].s5 = v[5].sD; results[13].s6 = v[6].sD; results[13].s7 = v[7].sD; results[13].s8 = v[8].sD; results[13].s9 = v[9].sD; results[13].sA = v[10].sD; results[13].sB = v[11].sD; results[13].sC = v[12].sD; results[13].sD = v[13].sD; results[13].sE = v[14].sD; results[13].sF = v[15].sD;
results[14].s0 = v[0].sE; results[14].s1 = v[1].sE; results[14].s2 = v[2].sE; results[14].s3 = v[3].sE; results[14].s4 = v[4].sE; results[14].s5 = v[5].sE; results[14].s6 = v[6].sE; results[14].s7 = v[7].sE; results[14].s8 = v[8].sE; results[14].s9 = v[9].sE; results[14].sA = v[10].sE; results[14].sB = v[11].sE; results[14].sC = v[12].sE; results[14].sD = v[13].sE; results[14].sE = v[14].sE; results[14].sF = v[15].sE;
results[15].s0 = v[0].sF; results[15].s1 = v[1].sF; results[15].s2 = v[2].sF; results[15].s3 = v[3].sF; results[15].s4 = v[4].sF; results[15].s5 = v[5].sF; results[15].s6 = v[6].sF; results[15].s7 = v[7].sF; results[15].s8 = v[8].sF; results[15].s9 = v[9].sF; results[15].sA = v[10].sF; results[15].sB = v[11].sF; results[15].sC = v[12].sF; results[15].sD = v[13].sF; results[15].sE = v[14].sF; results[15].sF = v[15].sF;
#endif
// Multiplies by alpha and then stores the results into the destination matrix

View File

@ -24,7 +24,7 @@ R"(
// Transposes a matrix from source to destination. The output is padded with zero values in case the
// destination matrix dimensions are larger than the transposed source matrix dimensions.
INLINE_FUNC void _TransposePadMatrix(__local real* tile,
INLINE_FUNC void _TransposePadMatrix(LOCAL_PTR real* tile,
const int src_one, const int src_two,
const int src_ld, const int src_offset,
__global const real* restrict src,
@ -105,7 +105,7 @@ void TransposePadMatrix(const int src_one, const int src_two,
// Transposes a matrix, while considering possible padding in the source matrix. Data is read from a
// padded source matrix, but only the actual data is written back to the transposed destination
// matrix. This kernel optionally checks for upper/lower triangular matrices.
INLINE_FUNC void _TransposeMatrix(__local real* tile,
INLINE_FUNC void _TransposeMatrix(LOCAL_PTR real* tile,
const int src_one, const int src_two,
const int src_ld, const int src_offset,
__global const real* restrict src,

View File

@ -19,8 +19,8 @@ R"(
// =================================================================================================
// Direct version of the batched GEMM kernel with [A, B] = [non-transposed, non-transposed]
__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
__kernel void XgemmDirectBatchedNN(const int kSizeM, const int kSizeN, const int kSizeK,
__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
void XgemmDirectBatchedNN(const int kSizeM, const int kSizeN, const int kSizeK,
const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas,
const __global realMD* restrict agm, const __constant int* a_offsets, const int a_ld,
const __global realND* restrict bgm, const __constant int* b_offsets, const int b_ld,
@ -40,8 +40,8 @@ __kernel void XgemmDirectBatchedNN(const int kSizeM, const int kSizeN, const int
}
// Direct version of the batched GEMM kernel with [A, B] = [non-transposed, transposed]
__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
__kernel void XgemmDirectBatchedNT(const int kSizeM, const int kSizeN, const int kSizeK,
__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
void XgemmDirectBatchedNT(const int kSizeM, const int kSizeN, const int kSizeK,
const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas,
const __global realMD* restrict agm, const __constant int* a_offsets, const int a_ld,
const __global realND* restrict bgm, const __constant int* b_offsets, const int b_ld,
@ -61,8 +61,8 @@ __kernel void XgemmDirectBatchedNT(const int kSizeM, const int kSizeN, const int
}
// Direct version of the batched GEMM kernel with [A, B] = [transposed, non-transposed]
__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
__kernel void XgemmDirectBatchedTN(const int kSizeM, const int kSizeN, const int kSizeK,
__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
void XgemmDirectBatchedTN(const int kSizeM, const int kSizeN, const int kSizeK,
const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas,
const __global realMD* restrict agm, const __constant int* a_offsets, const int a_ld,
const __global realND* restrict bgm, const __constant int* b_offsets, const int b_ld,
@ -82,8 +82,8 @@ __kernel void XgemmDirectBatchedTN(const int kSizeM, const int kSizeN, const int
}
// Direct version of the batched GEMM kernel with [A, B] = [transposed, transposed]
__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
__kernel void XgemmDirectBatchedTT(const int kSizeM, const int kSizeN, const int kSizeK,
__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
void XgemmDirectBatchedTT(const int kSizeM, const int kSizeN, const int kSizeK,
const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas,
const __global realMD* restrict agm, const __constant int* a_offsets, const int a_ld,
const __global realND* restrict bgm, const __constant int* b_offsets, const int b_ld,

View File

@ -184,7 +184,7 @@ INLINE_FUNC void GlobalToPrivateCheckedB(const __global real* restrict bgms, rea
// Caches on-chip local memory into per-thread private memory (registers). This function is specific
// for caching the A input matrix.
INLINE_FUNC void LocalToPrivateDirectA(__local real* alm, real apm[MWID], const int kg,
INLINE_FUNC void LocalToPrivateDirectA(LOCAL_PTR real* alm, real apm[MWID], const int kg,
const int a_transpose) {
#pragma unroll
for (int mi=0; mi<MWID; ++mi) {
@ -195,7 +195,7 @@ INLINE_FUNC void LocalToPrivateDirectA(__local real* alm, real apm[MWID], const
}
// Same as above, but now for the B input matrix
INLINE_FUNC void LocalToPrivateDirectB(__local real* blm, real bpm[NWID], const int kg,
INLINE_FUNC void LocalToPrivateDirectB(LOCAL_PTR real* blm, real bpm[NWID], const int kg,
const int b_transpose) {
#pragma unroll
for (int ni=0; ni<NWID; ++ni) {

View File

@ -19,7 +19,7 @@ R"(
// Caches global off-chip memory into local (shared) memory on-chip. This function is specific for
// caching the A input matrix.
INLINE_FUNC void GlobalToLocalDirectA(const __global realMD* restrict agm, __local real* alm,
INLINE_FUNC void GlobalToLocalDirectA(const __global realMD* restrict agm, LOCAL_PTR real* alm,
const int a_ld, const int a_offset, const int kwg,
const int a_transpose, const int a_conjugate) {
#if MDIMCD == MDIMAD
@ -90,7 +90,7 @@ INLINE_FUNC void GlobalToLocalDirectA(const __global realMD* restrict agm, __loc
}
// Same as above, but now for the B input matrix
INLINE_FUNC void GlobalToLocalDirectB(const __global realND* restrict bgm, __local real* blm,
INLINE_FUNC void GlobalToLocalDirectB(const __global realND* restrict bgm, LOCAL_PTR real* blm,
const int b_ld, const int b_offset, const int kwg,
const int b_transpose, const int b_conjugate) {
#if MDIMCD == NDIMBD
@ -165,7 +165,7 @@ INLINE_FUNC void GlobalToLocalDirectB(const __global realND* restrict bgm, __loc
// Caches global off-chip memory into local (shared) memory on-chip. This function is specific for
// caching the A input matrix. In contrast to the functions above, this function performs doesn't
// use the vector data-types.
INLINE_FUNC void GlobalToLocalScalarA(const __global real* restrict agms, __local real* alm,
INLINE_FUNC void GlobalToLocalScalarA(const __global real* restrict agms, LOCAL_PTR real* alm,
const int a_ld, const int a_offset, const int kwg,
const int a_transpose, const int a_conjugate) {
#if MDIMCD == MDIMAD
@ -196,7 +196,7 @@ INLINE_FUNC void GlobalToLocalScalarA(const __global real* restrict agms, __loca
}
// Same as above, but now for the B input matrix
INLINE_FUNC void GlobalToLocalScalarB(const __global real* restrict bgms, __local real* blm,
INLINE_FUNC void GlobalToLocalScalarB(const __global real* restrict bgms, LOCAL_PTR real* blm,
const int b_ld, const int b_offset, const int kwg,
const int b_transpose, const int b_conjugate) {
#if MDIMCD == NDIMBD
@ -231,7 +231,7 @@ INLINE_FUNC void GlobalToLocalScalarB(const __global real* restrict bgms, __loca
// Caches global off-chip memory into local (shared) memory on-chip. This function is specific for
// caching the A input matrix. In contrast to the functions above, this function performs bounds
// checks and doesn't use the vector data-types.
INLINE_FUNC void GlobalToLocalCheckedA(const __global real* restrict agms, __local real* alm,
INLINE_FUNC void GlobalToLocalCheckedA(const __global real* restrict agms, LOCAL_PTR real* alm,
const int a_ld, const int a_offset, const int kwg,
const int a_transpose, const int a_conjugate,
const int kSizeM, const int kSizeK) {
@ -270,7 +270,7 @@ INLINE_FUNC void GlobalToLocalCheckedA(const __global real* restrict agms, __loc
}
// Same as above, but now for the B input matrix
INLINE_FUNC void GlobalToLocalCheckedB(const __global real* restrict bgms, __local real* blm,
INLINE_FUNC void GlobalToLocalCheckedB(const __global real* restrict bgms, LOCAL_PTR real* blm,
const int b_ld, const int b_offset, const int kwg,
const int b_transpose, const int b_conjugate,
const int kSizeN, const int kSizeK) {

View File

@ -24,7 +24,7 @@ INLINE_FUNC void XgemmDirect(const int kSizeM, const int kSizeN, const int kSize
const __global realMD* restrict agm, const int a_offset, const int a_ld,
const __global realND* restrict bgm, const int b_offset, const int b_ld,
__global real* cgm, const int c_offset, const int c_ld,
__local real* alm, __local real* blm,
LOCAL_PTR real* alm, LOCAL_PTR real* blm,
const int a_transpose, const int b_transpose, const int c_transpose,
const int a_conjugate, const int b_conjugate) {
const real alpha = GetRealArg(arg_alpha);
@ -147,8 +147,8 @@ INLINE_FUNC void XgemmDirect(const int kSizeM, const int kSizeN, const int kSize
// =================================================================================================
// Direct version of the GEMM kernel with [A, B] = [non-transposed, non-transposed]
__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
__kernel void XgemmDirectNN(const int kSizeM, const int kSizeN, const int kSizeK,
__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
void XgemmDirectNN(const int kSizeM, const int kSizeN, const int kSizeK,
const real_arg arg_alpha, const real_arg arg_beta,
const __global realMD* restrict agm, const int a_offset, const int a_ld,
const __global realND* restrict bgm, const int b_offset, const int b_ld,
@ -162,8 +162,8 @@ __kernel void XgemmDirectNN(const int kSizeM, const int kSizeN, const int kSizeK
}
// Direct version of the GEMM kernel with [A, B] = [non-transposed, transposed]
__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
__kernel void XgemmDirectNT(const int kSizeM, const int kSizeN, const int kSizeK,
__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
void XgemmDirectNT(const int kSizeM, const int kSizeN, const int kSizeK,
const real_arg arg_alpha, const real_arg arg_beta,
const __global realMD* restrict agm, const int a_offset, const int a_ld,
const __global realND* restrict bgm, const int b_offset, const int b_ld,
@ -177,8 +177,8 @@ __kernel void XgemmDirectNT(const int kSizeM, const int kSizeN, const int kSizeK
}
// Direct version of the GEMM kernel with [A, B] = [transposed, non-transposed]
__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
__kernel void XgemmDirectTN(const int kSizeM, const int kSizeN, const int kSizeK,
__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
void XgemmDirectTN(const int kSizeM, const int kSizeN, const int kSizeK,
const real_arg arg_alpha, const real_arg arg_beta,
const __global realMD* restrict agm, const int a_offset, const int a_ld,
const __global realND* restrict bgm, const int b_offset, const int b_ld,
@ -192,8 +192,8 @@ __kernel void XgemmDirectTN(const int kSizeM, const int kSizeN, const int kSizeK
}
// Direct version of the GEMM kernel with [A, B] = [transposed, transposed]
__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
__kernel void XgemmDirectTT(const int kSizeM, const int kSizeN, const int kSizeK,
__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
void XgemmDirectTT(const int kSizeM, const int kSizeN, const int kSizeK,
const real_arg arg_alpha, const real_arg arg_beta,
const __global realMD* restrict agm, const int a_offset, const int a_ld,
const __global realND* restrict bgm, const int b_offset, const int b_ld,

View File

@ -186,7 +186,7 @@ INLINE_FUNC void InitAccRegisters(realM cpm[NWI][MWI/VWM]) {
// Caches global off-chip memory into local (shared) memory on-chip. This function is specific for
// caching the A input matrix.
#if SA == 1
INLINE_FUNC void GlobalToLocalA(const __global realM* restrict agm, __local realM* alm,
INLINE_FUNC void GlobalToLocalA(const __global realM* restrict agm, LOCAL_PTR realM* alm,
const int kSizeM, const int tid, const int kwg) {
const int la0 = tid % MDIMA;
const int la1 = tid / MDIMA;
@ -216,7 +216,7 @@ INLINE_FUNC void GlobalToLocalA(const __global realM* restrict agm, __local real
// Same as above, but now for the B input matrix
#if SB == 1
INLINE_FUNC void GlobalToLocalB(const __global realN* restrict bgm, __local realN* blm,
INLINE_FUNC void GlobalToLocalB(const __global realN* restrict bgm, LOCAL_PTR realN* blm,
const int kSizeN, const int tid, const int kwg) {
const int lb0 = tid % NDIMB;
const int lb1 = tid / NDIMB;
@ -298,7 +298,7 @@ INLINE_FUNC void GlobalToPrivateB(const __global realN* restrict bgm, realN bpm[
// Caches on-chip local memory into per-thread private memory (registers). This function is specific
// for caching the A input matrix.
#if SA == 1
INLINE_FUNC void LocalToPrivateA(__local realM* alm, realM apm[MWI/VWM], const int kg) {
INLINE_FUNC void LocalToPrivateA(LOCAL_PTR realM* alm, realM apm[MWI/VWM], const int kg) {
#pragma unroll
for (int mi=0; mi<MWI/VWM; ++mi) {
#if STRM == 0
@ -313,7 +313,7 @@ INLINE_FUNC void LocalToPrivateA(__local realM* alm, realM apm[MWI/VWM], const i
// Same as above, but now for the B input matrix
#if SB == 1
INLINE_FUNC void LocalToPrivateB(__local realN* blm, realN bpm[NWI/VWN], const int kg) {
INLINE_FUNC void LocalToPrivateB(LOCAL_PTR realN* blm, realN bpm[NWI/VWN], const int kg) {
#pragma unroll
for (int ni=0; ni<NWI/VWN; ++ni) {
#if STRN == 0

View File

@ -22,11 +22,11 @@ INLINE_FUNC void XgemmBody(const int kSizeM, const int kSizeN, const int kSizeK,
const __global realM* restrict agm, const __global realN* restrict bgm,
__global realM* cgm, realM cpm[NWI][MWI/VWM]
#if SA == 1 && SB == 1
, __local realM* alm, __local realN* blm
, LOCAL_PTR realM* alm, LOCAL_PTR realN* blm
#elif SA == 1
, __local realM* alm
, LOCAL_PTR realM* alm
#elif SB == 1
, __local realN* blm
, LOCAL_PTR realN* blm
#endif
) {

View File

@ -0,0 +1,90 @@
// =================================================================================================
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
// width of 100 characters per line.
//
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file contains an (incomplete) header to interpret OpenCL kernels as CUDA kernels.
//
// =================================================================================================
// Enables loading of this file using the C++ pre-processor's #include (C++11 standard raw string
// literal). Comment-out this line for syntax-highlighting when developing.
R"(
// =================================================================================================
// CLBlast specific additions
#define CUDA 1
#define LOCAL_PTR // pointers to local memory don't have to be annotated in CUDA
// Replaces the OpenCL get_xxx_ID with CUDA equivalents
__device__ int get_local_id(const int x) {
if (x == 0) { return threadIdx.x; }
if (x == 1) { return threadIdx.y; }
return threadIdx.z;
}
__device__ int get_group_id(const int x) {
if (x == 0) { return blockIdx.x; }
if (x == 1) { return blockIdx.y; }
return blockIdx.z;
}
__device__ int get_local_size(const int x) {
if (x == 0) { return blockDim.x; }
if (x == 1) { return blockDim.y; }
return blockDim.z;
}
__device__ int get_num_groups(const int x) {
if (x == 0) { return gridDim.x; }
if (x == 1) { return gridDim.y; }
return gridDim.z;
}
__device__ int get_global_size(const int x) {
if (x == 0) { return gridDim.x * blockDim.x; }
if (x == 1) { return gridDim.y * blockDim.y; }
return gridDim.z * blockDim.z;
}
__device__ int get_global_id(const int x) {
if (x == 0) { return blockIdx.x*blockDim.x + threadIdx.x; }
if (x == 1) { return blockIdx.y*blockDim.y + threadIdx.y; }
return blockIdx.z*blockDim.z + threadIdx.z;
}
// Adds the data-types which are not available natively under CUDA
typedef struct { float s0; float s1; float s2; float s3;
float s4; float s5; float s6; float s7; } float8;
typedef struct { float s0; float s1; float s2; float s3;
float s4; float s5; float s6; float s7;
float s8; float s9; float s10; float s11;
float s12; float s13; float s14; float s15; } float16;
typedef struct { double s0; double s1; double s2; double s3;
double s4; double s5; double s6; double s7; } double8;
typedef struct { double s0; double s1; double s2; double s3;
double s4; double s5; double s6; double s7;
double s8; double s9; double s10; double s11;
double s12; double s13; double s14; double s15; } double16;
// Replaces the OpenCL keywords with CUDA equivalent
#define __kernel __placeholder__
#define __global
#define __placeholder__ extern "C" __global__
#define __local __shared__
#define restrict __restrict__
#define __constant const
#define inline __device__ // assumes all device functions are annotated with inline in OpenCL
// Kernel attributes (don't replace currently)
#define reqd_work_group_size(x, y, z)
// Replaces OpenCL synchronisation with CUDA synchronisation
#define barrier(x) __syncthreads()
// =================================================================================================
// End of the C++11 raw string literal
)"
// =================================================================================================

View File

@ -167,6 +167,13 @@ void Routine::InitProgram(std::initializer_list<const char *> source) {
source_string += "#define GLOBAL_MEM_FENCE 1\n";
}
// Optionally adds a translation header from OpenCL kernels to CUDA kernels
#ifdef CUDA_API
source_string +=
#include "kernels/opencl_to_cuda.h"
;
#endif
// Loads the common header (typedefs and defines and such)
source_string +=
#include "kernels/common.opencl"

View File

@ -19,8 +19,7 @@
#include <string>
#include <vector>
#include "clpp11.hpp"
#include "clblast.h"
#include "utilities/utilities.hpp"
#include "database/database.hpp"
namespace clblast {

View File

@ -59,9 +59,9 @@ void XaxpyBatched<T>::DoAxpyBatched(const size_t n, const std::vector<T> &alphas
x_offsets_int[batch] = static_cast<int>(x_offsets[batch]);
y_offsets_int[batch] = static_cast<int>(y_offsets[batch]);
}
auto x_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
auto y_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
auto alphas_device = Buffer<T>(context_, BufferAccess::kReadOnly, batch_count);
auto x_offsets_device = Buffer<int>(context_, BufferAccess::kReadWrite, batch_count);
auto y_offsets_device = Buffer<int>(context_, BufferAccess::kReadWrite, batch_count);
auto alphas_device = Buffer<T>(context_, BufferAccess::kReadWrite, batch_count);
x_offsets_device.Write(queue_, batch_count, x_offsets_int);
y_offsets_device.Write(queue_, batch_count, y_offsets_int);
alphas_device.Write(queue_, batch_count, alphas);

View File

@ -100,8 +100,8 @@ void XgemmBatched<T>::DoGemmBatched(const Layout layout, const Transpose a_trans
}
// Upload the scalar arguments to the device
auto alphas_device = Buffer<T>(context_, BufferAccess::kReadOnly, batch_count);
auto betas_device = Buffer<T>(context_, BufferAccess::kReadOnly, batch_count);
auto alphas_device = Buffer<T>(context_, BufferAccess::kReadWrite, batch_count);
auto betas_device = Buffer<T>(context_, BufferAccess::kReadWrite, batch_count);
alphas_device.Write(queue_, batch_count, alphas);
betas_device.Write(queue_, batch_count, betas);
@ -200,8 +200,8 @@ void XgemmBatched<T>::BatchedGemmIndirect(const size_t m, const size_t n, const
// to fill it up until it reaches a certain multiple of size (kernel parameter dependent). In
// case nothing has to be done, these kernels can be skipped.
if (!a_no_temp) {
auto a_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
auto a_offsets_i_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
auto a_offsets_device = Buffer<int>(context_, BufferAccess::kReadWrite, batch_count);
auto a_offsets_i_device = Buffer<int>(context_, BufferAccess::kReadWrite, batch_count);
a_offsets_device.Write(queue_, batch_count, a_offsets);
a_offsets_i_device.Write(queue_, batch_count, a_offsets_i);
auto eventProcessA = Event();
@ -214,8 +214,8 @@ void XgemmBatched<T>::BatchedGemmIndirect(const size_t m, const size_t n, const
// As above, but now for matrix B
if (!b_no_temp) {
auto b_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
auto b_offsets_i_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
auto b_offsets_device = Buffer<int>(context_, BufferAccess::kReadWrite, batch_count);
auto b_offsets_i_device = Buffer<int>(context_, BufferAccess::kReadWrite, batch_count);
b_offsets_device.Write(queue_, batch_count, b_offsets);
b_offsets_i_device.Write(queue_, batch_count, b_offsets_i);
auto eventProcessB = Event();
@ -227,8 +227,8 @@ void XgemmBatched<T>::BatchedGemmIndirect(const size_t m, const size_t n, const
}
// As above, but now for matrix C
auto c_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
auto c_offsets_i_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
auto c_offsets_device = Buffer<int>(context_, BufferAccess::kReadWrite, batch_count);
auto c_offsets_i_device = Buffer<int>(context_, BufferAccess::kReadWrite, batch_count);
if (!c_no_temp) {
c_offsets_device.Write(queue_, batch_count, c_offsets);
c_offsets_i_device.Write(queue_, batch_count, c_offsets_i);
@ -297,9 +297,9 @@ void XgemmBatched<T>::BatchedGemmDirect(const size_t m, const size_t n, const si
const size_t batch_count) {
// Uploads the offsets to the device
auto a_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
auto b_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
auto c_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
auto a_offsets_device = Buffer<int>(context_, BufferAccess::kReadWrite, batch_count);
auto b_offsets_device = Buffer<int>(context_, BufferAccess::kReadWrite, batch_count);
auto c_offsets_device = Buffer<int>(context_, BufferAccess::kReadWrite, batch_count);
a_offsets_device.Write(queue_, batch_count, a_offsets);
b_offsets_device.Write(queue_, batch_count, b_offsets);
c_offsets_device.Write(queue_, batch_count, c_offsets);

View File

@ -0,0 +1,76 @@
// =================================================================================================
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
// width of 100 characters per line.
//
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file contains all the includes of all the routines in CLBlast.
//
// =================================================================================================
#ifndef CLBLAST_ROUTINES_ROUTINES_H_
#define CLBLAST_ROUTINES_ROUTINES_H_
// BLAS level-1 includes
#include "routines/level1/xswap.hpp"
#include "routines/level1/xscal.hpp"
#include "routines/level1/xcopy.hpp"
#include "routines/level1/xaxpy.hpp"
#include "routines/level1/xdot.hpp"
#include "routines/level1/xdotu.hpp"
#include "routines/level1/xdotc.hpp"
#include "routines/level1/xnrm2.hpp"
#include "routines/level1/xasum.hpp"
#include "routines/level1/xsum.hpp" // non-BLAS routine
#include "routines/level1/xamax.hpp"
#include "routines/level1/xamin.hpp" // non-BLAS routine
#include "routines/level1/xmax.hpp" // non-BLAS routine
#include "routines/level1/xmin.hpp" // non-BLAS routine
// BLAS level-2 includes
#include "routines/level2/xgemv.hpp"
#include "routines/level2/xgbmv.hpp"
#include "routines/level2/xhemv.hpp"
#include "routines/level2/xhbmv.hpp"
#include "routines/level2/xhpmv.hpp"
#include "routines/level2/xsymv.hpp"
#include "routines/level2/xsbmv.hpp"
#include "routines/level2/xspmv.hpp"
#include "routines/level2/xtrmv.hpp"
#include "routines/level2/xtbmv.hpp"
#include "routines/level2/xtpmv.hpp"
#include "routines/level2/xtrsv.hpp"
#include "routines/level2/xger.hpp"
#include "routines/level2/xgeru.hpp"
#include "routines/level2/xgerc.hpp"
#include "routines/level2/xher.hpp"
#include "routines/level2/xhpr.hpp"
#include "routines/level2/xher2.hpp"
#include "routines/level2/xhpr2.hpp"
#include "routines/level2/xsyr.hpp"
#include "routines/level2/xspr.hpp"
#include "routines/level2/xsyr2.hpp"
#include "routines/level2/xspr2.hpp"
// BLAS level-3 includes
#include "routines/level3/xgemm.hpp"
#include "routines/level3/xsymm.hpp"
#include "routines/level3/xhemm.hpp"
#include "routines/level3/xsyrk.hpp"
#include "routines/level3/xherk.hpp"
#include "routines/level3/xsyr2k.hpp"
#include "routines/level3/xher2k.hpp"
#include "routines/level3/xtrmm.hpp"
#include "routines/level3/xtrsm.hpp"
// Level-x includes (non-BLAS)
#include "routines/levelx/xomatcopy.hpp"
#include "routines/levelx/xim2col.hpp"
#include "routines/levelx/xaxpybatched.hpp"
#include "routines/levelx/xgemmbatched.hpp"
// CLBLAST_ROUTINES_ROUTINES_H_
#endif

View File

@ -15,7 +15,7 @@
#ifndef CLBLAST_BUFFER_TEST_H_
#define CLBLAST_BUFFER_TEST_H_
#include "clblast.h"
#include "utilities/utilities.hpp"
namespace clblast {
// =================================================================================================

View File

@ -16,8 +16,7 @@
#ifndef CLBLAST_EXCEPTIONS_H_
#define CLBLAST_EXCEPTIONS_H_
#include "clpp11.hpp"
#include "clblast.h"
#include "utilities/utilities.hpp"
namespace clblast {
// =================================================================================================

View File

@ -413,13 +413,17 @@ std::string GetDeviceVendor(const Device& device) {
// Mid-level info
std::string GetDeviceArchitecture(const Device& device) {
auto device_architecture = std::string{""};
if (device.HasExtension(kKhronosAttributesNVIDIA)) {
#ifdef CUDA_API
device_architecture = device.NVIDIAComputeCapability();
}
else if (device.HasExtension(kKhronosAttributesAMD)) {
device_architecture = device.Name(); // Name is architecture for AMD APP and AMD ROCm
}
// Note: no else - 'device_architecture' might be the empty string
#else
if (device.HasExtension(kKhronosAttributesNVIDIA)) {
device_architecture = device.NVIDIAComputeCapability();
}
else if (device.HasExtension(kKhronosAttributesAMD)) {
device_architecture = device.Name(); // Name is architecture for AMD APP and AMD ROCm
}
// Note: no else - 'device_architecture' might be the empty string
#endif
for (auto &find_and_replace : device_mapping::kArchitectureNames) { // replacing to common names
if (device_architecture == find_and_replace.first) { device_architecture = find_and_replace.second; }

View File

@ -21,8 +21,13 @@
#include <complex>
#include <random>
#include "clpp11.hpp"
#include "clblast.h"
#ifdef OPENCL_API
#include "clpp11.hpp"
#include "clblast.h"
#elif CUDA_API
#include "cupp11.hpp"
#include "clblast_cuda.h"
#endif
#include "clblast_half.h"
#include "utilities/clblast_exceptions.hpp"
#include "utilities/msvc.hpp"

View File

@ -15,21 +15,16 @@
// Main function (not within the clblast namespace)
int main(int argc, char *argv[]) {
auto errors = size_t{0};
// Tests GEMM based on the 'in-direct' kernel
errors += clblast::RunTests<clblast::TestXgemm<1, float>, float, float>(argc, argv, false, "SGEMM");
errors += clblast::RunTests<clblast::TestXgemm<1, double>, double, double>(argc, argv, true, "DGEMM");
errors += clblast::RunTests<clblast::TestXgemm<1, clblast::float2>, clblast::float2, clblast::float2>(argc, argv, true, "CGEMM");
errors += clblast::RunTests<clblast::TestXgemm<1, clblast::double2>, clblast::double2, clblast::double2>(argc, argv, true, "ZGEMM");
errors += clblast::RunTests<clblast::TestXgemm<1, clblast::half>, clblast::half, clblast::half>(argc, argv, true, "HGEMM");
// Tests GEMM based on the 'direct' kernel
errors += clblast::RunTests<clblast::TestXgemm<2, float>, float, float>(argc, argv, true, "SGEMM");
errors += clblast::RunTests<clblast::TestXgemm<2, double>, double, double>(argc, argv, true, "DGEMM");
errors += clblast::RunTests<clblast::TestXgemm<2, clblast::float2>, clblast::float2, clblast::float2>(argc, argv, true, "CGEMM");
errors += clblast::RunTests<clblast::TestXgemm<2, clblast::double2>, clblast::double2, clblast::double2>(argc, argv, true, "ZGEMM");
errors += clblast::RunTests<clblast::TestXgemm<2, clblast::half>, clblast::half, clblast::half>(argc, argv, true, "HGEMM");
if (errors > 0) { return 1; } else { return 0; }
}

View File

@ -241,36 +241,22 @@ void TestBlas<T,U>::TestInvalid(std::vector<Arguments<U>> &test_vector, const st
std::cout << std::flush;
}
// Creates the OpenCL buffers. Note: we are not using the C++ version since we explicitly
// Creates the buffers. Note: we are not using the cxpp11.h C++ version since we explicitly
// want to be able to create invalid buffers (no error checking here).
auto x1 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.x_size*sizeof(T), nullptr,nullptr);
auto y1 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.y_size*sizeof(T), nullptr,nullptr);
auto a1 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.a_size*sizeof(T), nullptr,nullptr);
auto b1 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.b_size*sizeof(T), nullptr,nullptr);
auto c1 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.c_size*sizeof(T), nullptr,nullptr);
auto ap1 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.ap_size*sizeof(T), nullptr,nullptr);
auto d1 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.scalar_size*sizeof(T), nullptr,nullptr);
auto x_vec1 = Buffer<T>(x1);
auto y_vec1 = Buffer<T>(y1);
auto a_mat1 = Buffer<T>(a1);
auto b_mat1 = Buffer<T>(b1);
auto c_mat1 = Buffer<T>(c1);
auto ap_mat1 = Buffer<T>(ap1);
auto scalar1 = Buffer<T>(d1);
auto x2 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.x_size*sizeof(T), nullptr,nullptr);
auto y2 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.y_size*sizeof(T), nullptr,nullptr);
auto a2 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.a_size*sizeof(T), nullptr,nullptr);
auto b2 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.b_size*sizeof(T), nullptr,nullptr);
auto c2 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.c_size*sizeof(T), nullptr,nullptr);
auto ap2 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.ap_size*sizeof(T), nullptr,nullptr);
auto d2 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.scalar_size*sizeof(T), nullptr,nullptr);
auto x_vec2 = Buffer<T>(x2);
auto y_vec2 = Buffer<T>(y2);
auto a_mat2 = Buffer<T>(a2);
auto b_mat2 = Buffer<T>(b2);
auto c_mat2 = Buffer<T>(c2);
auto ap_mat2 = Buffer<T>(ap2);
auto scalar2 = Buffer<T>(d2);
auto x_vec1 = CreateInvalidBuffer<T>(context_, args.x_size);
auto y_vec1 = CreateInvalidBuffer<T>(context_, args.y_size);
auto a_mat1 = CreateInvalidBuffer<T>(context_, args.a_size);
auto b_mat1 = CreateInvalidBuffer<T>(context_, args.b_size);
auto c_mat1 = CreateInvalidBuffer<T>(context_, args.c_size);
auto ap_mat1 = CreateInvalidBuffer<T>(context_, args.ap_size);
auto scalar1 = CreateInvalidBuffer<T>(context_, args.scalar_size);
auto x_vec2 = CreateInvalidBuffer<T>(context_, args.x_size);
auto y_vec2 = CreateInvalidBuffer<T>(context_, args.y_size);
auto a_mat2 = CreateInvalidBuffer<T>(context_, args.a_size);
auto b_mat2 = CreateInvalidBuffer<T>(context_, args.b_size);
auto c_mat2 = CreateInvalidBuffer<T>(context_, args.c_size);
auto ap_mat2 = CreateInvalidBuffer<T>(context_, args.ap_size);
auto scalar2 = CreateInvalidBuffer<T>(context_, args.scalar_size);
auto buffers1 = Buffers<T>{x_vec1, y_vec1, a_mat1, b_mat1, c_mat1, ap_mat1, scalar1};
auto buffers2 = Buffers<T>{x_vec2, y_vec2, a_mat2, b_mat2, c_mat2, ap_mat2, scalar2};

View File

@ -22,13 +22,13 @@
#include <vector>
#include <memory>
#include "utilities/utilities.hpp"
#include "test/test_utilities.hpp"
// The libraries
#ifdef CLBLAST_REF_CLBLAS
#include <clBLAS.h>
#endif
#include "clblast.h"
namespace clblast {
// =================================================================================================

View File

@ -32,7 +32,7 @@
#include <clBLAS.h>
#endif
#include "test/wrapper_cuda.hpp"
#include "clblast.h"
#include "utilities/utilities.hpp"
namespace clblast {
// =================================================================================================

View File

@ -69,13 +69,21 @@ class TestXamax {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Amax<T>(args.n,
buffers.scalar(), args.imax_offset,
buffers.x_vec(), args.x_offset, args.x_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Amax<T>(args.n,
buffers.scalar(), args.imax_offset,
buffers.x_vec(), args.x_offset, args.x_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Amax<T>(args.n,
buffers.scalar(), args.imax_offset,
buffers.x_vec(), args.x_offset, args.x_inc,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -69,13 +69,21 @@ class TestXasum {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Asum<T>(args.n,
buffers.scalar(), args.asum_offset,
buffers.x_vec(), args.x_offset, args.x_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
auto status = Asum<T>(args.n,
buffers.scalar(), args.asum_offset,
buffers.x_vec(), args.x_offset, args.x_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Asum<T>(args.n,
buffers.scalar(), args.asum_offset,
buffers.x_vec(), args.x_offset, args.x_inc,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -70,13 +70,21 @@ class TestXaxpy {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Axpy(args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Axpy(args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Axpy(args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -69,13 +69,21 @@ class TestXcopy {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Copy<T>(args.n,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Copy<T>(args.n,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Copy<T>(args.n,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -73,14 +73,23 @@ class TestXdot {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Dot<T>(args.n,
buffers.scalar(), args.dot_offset,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Dot<T>(args.n,
buffers.scalar(), args.dot_offset,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Dot<T>(args.n,
buffers.scalar(), args.dot_offset,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -73,14 +73,23 @@ class TestXdotc {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Dotc<T>(args.n,
buffers.scalar(), args.dot_offset,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Dotc<T>(args.n,
buffers.scalar(), args.dot_offset,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Dotc<T>(args.n,
buffers.scalar(), args.dot_offset,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -73,14 +73,23 @@ class TestXdotu {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Dotu<T>(args.n,
buffers.scalar(), args.dot_offset,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Dotu<T>(args.n,
buffers.scalar(), args.dot_offset,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Dotu<T>(args.n,
buffers.scalar(), args.dot_offset,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -69,13 +69,21 @@ class TestXnrm2 {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Nrm2<T>(args.n,
buffers.scalar(), args.nrm2_offset,
buffers.x_vec(), args.x_offset, args.x_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Nrm2<T>(args.n,
buffers.scalar(), args.nrm2_offset,
buffers.x_vec(), args.x_offset, args.x_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Nrm2<T>(args.n,
buffers.scalar(), args.nrm2_offset,
buffers.x_vec(), args.x_offset, args.x_inc,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -66,12 +66,19 @@ class TestXscal {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Scal(args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Scal(args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Scal(args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -69,13 +69,21 @@ class TestXswap {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Swap<T>(args.n,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Swap<T>(args.n,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Swap<T>(args.n,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -81,15 +81,25 @@ class TestXgbmv {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Gbmv(args.layout, args.a_transpose,
args.m, args.n, args.kl, args.ku, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Gbmv(args.layout, args.a_transpose,
args.m, args.n, args.kl, args.ku, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Gbmv(args.layout, args.a_transpose,
args.m, args.n, args.kl, args.ku, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -81,15 +81,25 @@ class TestXgemv {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Gemv(args.layout, args.a_transpose,
args.m, args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Gemv(args.layout, args.a_transpose,
args.m, args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Gemv(args.layout, args.a_transpose,
args.m, args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -77,15 +77,25 @@ class TestXger {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Ger(args.layout,
args.m, args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
buffers.a_mat(), args.a_offset, args.a_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Ger(args.layout,
args.m, args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
buffers.a_mat(), args.a_offset, args.a_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Ger(args.layout,
args.m, args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
buffers.a_mat(), args.a_offset, args.a_ld,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -77,15 +77,25 @@ class TestXgerc {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Gerc(args.layout,
args.m, args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
buffers.a_mat(), args.a_offset, args.a_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Gerc(args.layout,
args.m, args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
buffers.a_mat(), args.a_offset, args.a_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Gerc(args.layout,
args.m, args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
buffers.a_mat(), args.a_offset, args.a_ld,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -77,15 +77,25 @@ class TestXgeru {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Geru(args.layout,
args.m, args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
buffers.a_mat(), args.a_offset, args.a_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Geru(args.layout,
args.m, args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
buffers.a_mat(), args.a_offset, args.a_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Geru(args.layout,
args.m, args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
buffers.a_mat(), args.a_offset, args.a_ld,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -75,15 +75,25 @@ class TestXhbmv {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Hbmv(args.layout, args.triangle,
args.n, args.kl, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Hbmv(args.layout, args.triangle,
args.n, args.kl, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Hbmv(args.layout, args.triangle,
args.n, args.kl, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -75,15 +75,25 @@ class TestXhemv {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Hemv(args.layout, args.triangle,
args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Hemv(args.layout, args.triangle,
args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Hemv(args.layout, args.triangle,
args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -71,14 +71,23 @@ class TestXher {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<U> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Her(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.a_mat(), args.a_offset, args.a_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Her(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.a_mat(), args.a_offset, args.a_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Her(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.a_mat(), args.a_offset, args.a_ld,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -75,15 +75,25 @@ class TestXher2 {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Her2(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
buffers.a_mat(), args.a_offset, args.a_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Her2(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
buffers.a_mat(), args.a_offset, args.a_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Her2(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
buffers.a_mat(), args.a_offset, args.a_ld,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -75,15 +75,25 @@ class TestXhpmv {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Hpmv(args.layout, args.triangle,
args.n, args.alpha,
buffers.ap_mat(), args.ap_offset,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Hpmv(args.layout, args.triangle,
args.n, args.alpha,
buffers.ap_mat(), args.ap_offset,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Hpmv(args.layout, args.triangle,
args.n, args.alpha,
buffers.ap_mat(), args.ap_offset,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -71,14 +71,23 @@ class TestXhpr {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<U> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Hpr(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.ap_mat(), args.ap_offset,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Hpr(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.ap_mat(), args.ap_offset,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Hpr(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.ap_mat(), args.ap_offset,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -75,15 +75,25 @@ class TestXhpr2 {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Hpr2(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
buffers.ap_mat(), args.ap_offset,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Hpr2(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
buffers.ap_mat(), args.ap_offset,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Hpr2(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
buffers.ap_mat(), args.ap_offset,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -75,15 +75,25 @@ class TestXsbmv {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Sbmv(args.layout, args.triangle,
args.n, args.kl, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Sbmv(args.layout, args.triangle,
args.n, args.kl, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Sbmv(args.layout, args.triangle,
args.n, args.kl, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -75,15 +75,25 @@ class TestXspmv {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Spmv(args.layout, args.triangle,
args.n, args.alpha,
buffers.ap_mat(), args.ap_offset,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Spmv(args.layout, args.triangle,
args.n, args.alpha,
buffers.ap_mat(), args.ap_offset,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Spmv(args.layout, args.triangle,
args.n, args.alpha,
buffers.ap_mat(), args.ap_offset,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -71,14 +71,23 @@ class TestXspr {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Spr(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.ap_mat(), args.ap_offset,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Spr(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.ap_mat(), args.ap_offset,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Spr(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.ap_mat(), args.ap_offset,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -75,15 +75,25 @@ class TestXspr2 {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Spr2(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
buffers.ap_mat(), args.ap_offset,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Spr2(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
buffers.ap_mat(), args.ap_offset,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Spr2(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
buffers.ap_mat(), args.ap_offset,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -75,15 +75,25 @@ class TestXsymv {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Symv(args.layout, args.triangle,
args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Symv(args.layout, args.triangle,
args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Symv(args.layout, args.triangle,
args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc, args.beta,
buffers.y_vec(), args.y_offset, args.y_inc,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -71,14 +71,23 @@ class TestXsyr {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Syr(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.a_mat(), args.a_offset, args.a_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Syr(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.a_mat(), args.a_offset, args.a_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Syr(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.a_mat(), args.a_offset, args.a_ld,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -75,15 +75,25 @@ class TestXsyr2 {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Syr2(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
buffers.a_mat(), args.a_offset, args.a_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Syr2(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
buffers.a_mat(), args.a_offset, args.a_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Syr2(args.layout, args.triangle,
args.n, args.alpha,
buffers.x_vec(), args.x_offset, args.x_inc,
buffers.y_vec(), args.y_offset, args.y_inc,
buffers.a_mat(), args.a_offset, args.a_ld,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -70,14 +70,23 @@ class TestXtbmv {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Tbmv<T>(args.layout, args.triangle, args.a_transpose, args.diagonal,
args.n, args.kl,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Tbmv<T>(args.layout, args.triangle, args.a_transpose, args.diagonal,
args.n, args.kl,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Tbmv<T>(args.layout, args.triangle, args.a_transpose, args.diagonal,
args.n, args.kl,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -70,14 +70,23 @@ class TestXtpmv {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Tpmv<T>(args.layout, args.triangle, args.a_transpose, args.diagonal,
args.n,
buffers.ap_mat(), args.ap_offset,
buffers.x_vec(), args.x_offset, args.x_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Tpmv<T>(args.layout, args.triangle, args.a_transpose, args.diagonal,
args.n,
buffers.ap_mat(), args.ap_offset,
buffers.x_vec(), args.x_offset, args.x_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Tpmv<T>(args.layout, args.triangle, args.a_transpose, args.diagonal,
args.n,
buffers.ap_mat(), args.ap_offset,
buffers.x_vec(), args.x_offset, args.x_inc,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -70,14 +70,23 @@ class TestXtrmv {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Trmv<T>(args.layout, args.triangle, args.a_transpose, args.diagonal,
args.n,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Trmv<T>(args.layout, args.triangle, args.a_transpose, args.diagonal,
args.n,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Trmv<T>(args.layout, args.triangle, args.a_transpose, args.diagonal,
args.n,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -85,14 +85,23 @@ class TestXtrsv {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Trsv<T>(args.layout, args.triangle, args.a_transpose, args.diagonal,
args.n,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Trsv<T>(args.layout, args.triangle, args.a_transpose, args.diagonal,
args.n,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Trsv<T>(args.layout, args.triangle, args.a_transpose, args.diagonal,
args.n,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.x_vec(), args.x_offset, args.x_inc,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -90,15 +90,25 @@ class TestXgemm {
{{"XGEMM_MIN_INDIRECT_SIZE", switch_threshold}});
if (override_status != StatusCode::kSuccess) { return override_status; }
}
auto queue_plain = queue();
auto event = cl_event{};
auto status = Gemm(args.layout, args.a_transpose, args.b_transpose,
args.m, args.n, args.k, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld, args.beta,
buffers.c_mat(), args.c_offset, args.c_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Gemm(args.layout, args.a_transpose, args.b_transpose,
args.m, args.n, args.k, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld, args.beta,
buffers.c_mat(), args.c_offset, args.c_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Gemm(args.layout, args.a_transpose, args.b_transpose,
args.m, args.n, args.k, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld, args.beta,
buffers.c_mat(), args.c_offset, args.c_ld,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -83,15 +83,25 @@ class TestXhemm {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Hemm(args.layout, args.side, args.triangle,
args.m, args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld, args.beta,
buffers.c_mat(), args.c_offset, args.c_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Hemm(args.layout, args.side, args.triangle,
args.m, args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld, args.beta,
buffers.c_mat(), args.c_offset, args.c_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Hemm(args.layout, args.side, args.triangle,
args.m, args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld, args.beta,
buffers.c_mat(), args.c_offset, args.c_ld,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -81,16 +81,26 @@ class TestXher2k {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<U> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto alpha2 = T{args.alpha, args.alpha};
auto status = Her2k(args.layout, args.triangle, args.a_transpose,
args.n, args.k, alpha2,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld, args.beta,
buffers.c_mat(), args.c_offset, args.c_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Her2k(args.layout, args.triangle, args.a_transpose,
args.n, args.k, alpha2,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld, args.beta,
buffers.c_mat(), args.c_offset, args.c_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Her2k(args.layout, args.triangle, args.a_transpose,
args.n, args.k, alpha2,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld, args.beta,
buffers.c_mat(), args.c_offset, args.c_ld,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -74,14 +74,23 @@ class TestXherk {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<U> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Herk(args.layout, args.triangle, args.a_transpose,
args.n, args.k, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld, args.beta,
buffers.c_mat(), args.c_offset, args.c_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Herk(args.layout, args.triangle, args.a_transpose,
args.n, args.k, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld, args.beta,
buffers.c_mat(), args.c_offset, args.c_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Herk(args.layout, args.triangle, args.a_transpose,
args.n, args.k, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld, args.beta,
buffers.c_mat(), args.c_offset, args.c_ld,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -83,15 +83,25 @@ class TestXsymm {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Symm(args.layout, args.side, args.triangle,
args.m, args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld, args.beta,
buffers.c_mat(), args.c_offset, args.c_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Symm(args.layout, args.side, args.triangle,
args.m, args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld, args.beta,
buffers.c_mat(), args.c_offset, args.c_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Symm(args.layout, args.side, args.triangle,
args.m, args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld, args.beta,
buffers.c_mat(), args.c_offset, args.c_ld,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -81,15 +81,25 @@ class TestXsyr2k {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Syr2k(args.layout, args.triangle, args.a_transpose,
args.n, args.k, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld, args.beta,
buffers.c_mat(), args.c_offset, args.c_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Syr2k(args.layout, args.triangle, args.a_transpose,
args.n, args.k, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld, args.beta,
buffers.c_mat(), args.c_offset, args.c_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Syr2k(args.layout, args.triangle, args.a_transpose,
args.n, args.k, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld, args.beta,
buffers.c_mat(), args.c_offset, args.c_ld,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -74,14 +74,23 @@ class TestXsyrk {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Syrk(args.layout, args.triangle, args.a_transpose,
args.n, args.k, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld, args.beta,
buffers.c_mat(), args.c_offset, args.c_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Syrk(args.layout, args.triangle, args.a_transpose,
args.n, args.k, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld, args.beta,
buffers.c_mat(), args.c_offset, args.c_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Syrk(args.layout, args.triangle, args.a_transpose,
args.n, args.k, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld, args.beta,
buffers.c_mat(), args.c_offset, args.c_ld,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -74,14 +74,23 @@ class TestXtrmm {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Trmm(args.layout, args.side, args.triangle, args.a_transpose, args.diagonal,
args.m, args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Trmm(args.layout, args.side, args.triangle, args.a_transpose, args.diagonal,
args.m, args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Trmm(args.layout, args.side, args.triangle, args.a_transpose, args.diagonal,
args.m, args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -85,14 +85,23 @@ class TestXtrsm {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Trsm(args.layout, args.side, args.triangle, args.a_transpose, args.diagonal,
args.m, args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Trsm(args.layout, args.side, args.triangle, args.a_transpose, args.diagonal,
args.m, args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Trsm(args.layout, args.side, args.triangle, args.a_transpose, args.diagonal,
args.m, args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -83,14 +83,23 @@ class TestXaxpyBatched {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = AxpyBatched(args.n, args.alphas.data(),
buffers.x_vec(), args.x_offsets.data(), args.x_inc,
buffers.y_vec(), args.y_offsets.data(), args.y_inc,
args.batch_count,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = AxpyBatched(args.n, args.alphas.data(),
buffers.x_vec(), args.x_offsets.data(), args.x_inc,
buffers.y_vec(), args.y_offsets.data(), args.y_inc,
args.batch_count,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = AxpyBatched(args.n, args.alphas.data(),
buffers.x_vec(), args.x_offsets.data(), args.x_inc,
buffers.y_vec(), args.y_offsets.data(), args.y_inc,
args.batch_count,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -108,8 +108,6 @@ class TestXgemmBatched {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
// Relaxed requirement on ld_a and ld_b within the library, this is here to match clBLAS
auto a_rotated = (args.layout == Layout::kColMajor && args.a_transpose != Transpose::kNo) ||
(args.layout == Layout::kRowMajor && args.a_transpose == Transpose::kNo);
@ -119,14 +117,27 @@ class TestXgemmBatched {
auto b_one = (!b_rotated) ? args.k : args.n;
if (args.a_ld < a_one) { return StatusCode::kInvalidLeadDimA; }
if (args.b_ld < b_one) { return StatusCode::kInvalidLeadDimB; }
auto status = GemmBatched(args.layout, args.a_transpose, args.b_transpose,
args.m, args.n, args.k, args.alphas.data(),
buffers.a_mat(), args.a_offsets.data(), args.a_ld,
buffers.b_mat(), args.b_offsets.data(), args.b_ld, args.betas.data(),
buffers.c_mat(), args.c_offsets.data(), args.c_ld,
args.batch_count,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = GemmBatched(args.layout, args.a_transpose, args.b_transpose,
args.m, args.n, args.k, args.alphas.data(),
buffers.a_mat(), args.a_offsets.data(), args.a_ld,
buffers.b_mat(), args.b_offsets.data(), args.b_ld, args.betas.data(),
buffers.c_mat(), args.c_offsets.data(), args.c_ld,
args.batch_count,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = GemmBatched(args.layout, args.a_transpose, args.b_transpose,
args.m, args.n, args.k, args.alphas.data(),
buffers.a_mat(), args.a_offsets.data(), args.a_ld,
buffers.b_mat(), args.b_offsets.data(), args.b_ld, args.betas.data(),
buffers.c_mat(), args.c_offsets.data(), args.c_ld,
args.batch_count,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -84,17 +84,29 @@ public:
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Im2col<T>(args.channels, args.height, args.width,
args.kernel_h, args.kernel_w,
args.pad_h, args.pad_w,
args.stride_h, args.stride_w,
args.dilation_h, args.dilation_w,
buffers.a_mat(), args.a_offset,
buffers.b_mat(), args.b_offset,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Im2col<T>(args.channels, args.height, args.width,
args.kernel_h, args.kernel_w,
args.pad_h, args.pad_w,
args.stride_h, args.stride_w,
args.dilation_h, args.dilation_w,
buffers.a_mat(), args.a_offset,
buffers.b_mat(), args.b_offset,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Im2col<T>(args.channels, args.height, args.width,
args.kernel_h, args.kernel_w,
args.pad_h, args.pad_w,
args.stride_h, args.stride_w,
args.dilation_h, args.dilation_w,
buffers.a_mat(), args.a_offset,
buffers.b_mat(), args.b_offset,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -164,14 +164,23 @@ class TestXinvert {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
try {
auto event = cl_event{};
auto inverter = Xinvert<T>(queue, &event);
inverter.InvertMatrixDiagonalBlocks(args.layout, args.triangle, args.diagonal,
args.n, args.m,
buffers.a_mat, args.a_offset, args.a_ld,
buffers.b_mat);
clWaitForEvents(1, &event);
clReleaseEvent(event);
#ifdef OPENCL_API
auto event = cl_event{};
auto inverter = Xinvert<T>(queue, &event);
inverter.InvertMatrixDiagonalBlocks(args.layout, args.triangle, args.diagonal,
args.n, args.m,
buffers.a_mat, args.a_offset, args.a_ld,
buffers.b_mat);
clWaitForEvents(1, &event);
clReleaseEvent(event);
#elif CUDA_API
auto inverter = Xinvert<T>(queue, nullptr);
inverter.InvertMatrixDiagonalBlocks(args.layout, args.triangle, args.diagonal,
args.n, args.m,
buffers.a_mat, args.a_offset, args.a_ld,
buffers.b_mat);
cuStreamSynchronize(queue());
#endif
} catch (...) { return DispatchException(); }
return StatusCode::kSuccess;
}

View File

@ -126,14 +126,23 @@ class TestXomatcopy {
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Omatcopy<T>(args.layout, args.a_transpose,
args.m, args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Omatcopy<T>(args.layout, args.a_transpose,
args.m, args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Omatcopy<T>(args.layout, args.a_transpose,
args.m, args.n, args.alpha,
buffers.a_mat(), args.a_offset, args.a_ld,
buffers.b_mat(), args.b_offset, args.b_ld,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}

View File

@ -88,27 +88,29 @@ void FloatToHalfBuffer(std::vector<half>& result, const std::vector<float>& sour
}
// As above, but now for OpenCL data-types instead of std::vectors
Buffer<float> HalfToFloatBuffer(const Buffer<half>& source, RawCommandQueue queue_raw) {
const auto size = source.GetSize() / sizeof(half);
auto queue = Queue(queue_raw);
auto context = queue.GetContext();
auto source_cpu = std::vector<half>(size);
source.Read(queue, size, source_cpu);
auto result_cpu = HalfToFloatBuffer(source_cpu);
auto result = Buffer<float>(context, size);
result.Write(queue, size, result_cpu);
return result;
}
void FloatToHalfBuffer(Buffer<half>& result, const Buffer<float>& source, RawCommandQueue queue_raw) {
const auto size = source.GetSize() / sizeof(float);
auto queue = Queue(queue_raw);
auto context = queue.GetContext();
auto source_cpu = std::vector<float>(size);
source.Read(queue, size, source_cpu);
auto result_cpu = std::vector<half>(size);
FloatToHalfBuffer(result_cpu, source_cpu);
result.Write(queue, size, result_cpu);
}
#ifdef OPENCL_API
Buffer<float> HalfToFloatBuffer(const Buffer<half>& source, RawCommandQueue queue_raw) {
const auto size = source.GetSize() / sizeof(half);
auto queue = Queue(queue_raw);
auto context = queue.GetContext();
auto source_cpu = std::vector<half>(size);
source.Read(queue, size, source_cpu);
auto result_cpu = HalfToFloatBuffer(source_cpu);
auto result = Buffer<float>(context, size);
result.Write(queue, size, result_cpu);
return result;
}
void FloatToHalfBuffer(Buffer<half>& result, const Buffer<float>& source, RawCommandQueue queue_raw) {
const auto size = source.GetSize() / sizeof(float);
auto queue = Queue(queue_raw);
auto context = queue.GetContext();
auto source_cpu = std::vector<float>(size);
source.Read(queue, size, source_cpu);
auto result_cpu = std::vector<half>(size);
FloatToHalfBuffer(result_cpu, source_cpu);
result.Write(queue, size, result_cpu);
}
#endif
// =================================================================================================
} // namespace clblast

View File

@ -89,8 +89,25 @@ std::vector<float> HalfToFloatBuffer(const std::vector<half>& source);
void FloatToHalfBuffer(std::vector<half>& result, const std::vector<float>& source);
// As above, but now for OpenCL data-types instead of std::vectors
Buffer<float> HalfToFloatBuffer(const Buffer<half>& source, RawCommandQueue queue_raw);
void FloatToHalfBuffer(Buffer<half>& result, const Buffer<float>& source, RawCommandQueue queue_raw);
#ifdef OPENCL_API
Buffer<float> HalfToFloatBuffer(const Buffer<half>& source, RawCommandQueue queue_raw);
void FloatToHalfBuffer(Buffer<half>& result, const Buffer<float>& source, RawCommandQueue queue_raw);
#endif
// =================================================================================================
// Creates a buffer but don't test for validity. That's the reason this is not using the clpp11.h or
// cupp11.h interface.
template <typename T>
Buffer<T> CreateInvalidBuffer(const Context& context, const size_t size) {
#ifdef OPENCL_API
auto raw_buffer = clCreateBuffer(context(), CL_MEM_READ_WRITE, size * sizeof(T), nullptr, nullptr);
#elif CUDA_API
CUdeviceptr raw_buffer;
cuMemAlloc(&raw_buffer, size * sizeof(T));
#endif
return Buffer<T>(raw_buffer);
}
// =================================================================================================
} // namespace clblast

View File

@ -22,6 +22,7 @@
#include "utilities/utilities.hpp"
#ifdef CLBLAST_REF_CUBLAS
#define CUDA_NO_HALF
#include <cuda_runtime.h>
#include <cublas_v2.h>
#endif