Added convgemm skeleton, test infrastructure, and first reference implementation

pull/319/head
Cedric Nugteren 2018-05-06 11:35:34 +02:00
parent 2776d76176
commit 2d1f6ba7fe
13 changed files with 508 additions and 17 deletions

View File

@ -204,7 +204,7 @@ set(LEVEL1_ROUTINES xswap xscal xcopy xaxpy xdot xdotu xdotc xnrm2 xasum xamax)
set(LEVEL2_ROUTINES xgemv xgbmv xhemv xhbmv xhpmv xsymv xsbmv xspmv xtrmv xtbmv xtpmv xtrsv
xger xgeru xgerc xher xhpr xher2 xhpr2 xsyr xspr xsyr2 xspr2)
set(LEVEL3_ROUTINES xgemm xsymm xhemm xsyrk xherk xsyr2k xher2k xtrmm xtrsm)
set(LEVELX_ROUTINES xhad xomatcopy xim2col xaxpybatched xgemmbatched xgemmstridedbatched)
set(LEVELX_ROUTINES xhad xomatcopy xim2col xconvgemm xaxpybatched xgemmbatched xgemmstridedbatched)
set(ROUTINES ${LEVEL1_ROUTINES} ${LEVEL2_ROUTINES} ${LEVEL3_ROUTINES} ${LEVELX_ROUTINES})
set(PRECISIONS 32 64 3232 6464 16)

View File

@ -3072,6 +3072,76 @@ Arguments to IM2COL:
xCONVGEMM: Batched convolution as GEMM (non-BLAS function)
-------------
Integrates im2col and GEMM for batched 3D convolution, in which _im_ is the 4D input tensor, _kernel_ the 4D kernel weights tensor, and _result_ the 4D output tensor.
C++ API:
```
template <typename T>
StatusCode Convgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count,
const cl_mem im_buffer, const size_t im_offset,
const cl_mem kernel_buffer, const size_t kernel_offset,
cl_mem result_buffer, const size_t result_offset,
cl_command_queue* queue, cl_event* event)
```
C API:
```
CLBlastStatusCode CLBlastSconvgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count,
const cl_mem im_buffer, const size_t im_offset,
const cl_mem kernel_buffer, const size_t kernel_offset,
cl_mem result_buffer, const size_t result_offset,
cl_command_queue* queue, cl_event* event)
CLBlastStatusCode CLBlastDconvgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count,
const cl_mem im_buffer, const size_t im_offset,
const cl_mem kernel_buffer, const size_t kernel_offset,
cl_mem result_buffer, const size_t result_offset,
cl_command_queue* queue, cl_event* event)
CLBlastStatusCode CLBlastCconvgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count,
const cl_mem im_buffer, const size_t im_offset,
const cl_mem kernel_buffer, const size_t kernel_offset,
cl_mem result_buffer, const size_t result_offset,
cl_command_queue* queue, cl_event* event)
CLBlastStatusCode CLBlastZconvgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count,
const cl_mem im_buffer, const size_t im_offset,
const cl_mem kernel_buffer, const size_t kernel_offset,
cl_mem result_buffer, const size_t result_offset,
cl_command_queue* queue, cl_event* event)
CLBlastStatusCode CLBlastHconvgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count,
const cl_mem im_buffer, const size_t im_offset,
const cl_mem kernel_buffer, const size_t kernel_offset,
cl_mem result_buffer, const size_t result_offset,
cl_command_queue* queue, cl_event* event)
```
Arguments to CONVGEMM:
* `const size_t channels`: Integer size argument. This value must be positive.
* `const size_t height`: Integer size argument. This value must be positive.
* `const size_t width`: Integer size argument. This value must be positive.
* `const size_t kernel_h`: Integer size argument. This value must be positive.
* `const size_t kernel_w`: Integer size argument. This value must be positive.
* `const size_t pad_h`: Integer size argument. This value must be positive.
* `const size_t pad_w`: Integer size argument. This value must be positive.
* `const size_t stride_h`: Integer size argument. This value must be positive.
* `const size_t stride_w`: Integer size argument. This value must be positive.
* `const size_t dilation_h`: Integer size argument. This value must be positive.
* `const size_t dilation_w`: Integer size argument. This value must be positive.
* `const size_t num_kernels`: Integer size argument. This value must be positive.
* `const size_t batch_count`: Integer size argument. This value must be positive.
* `const cl_mem im_buffer`: OpenCL buffer to store the input im tensor.
* `const size_t im_offset`: The offset in elements from the start of the input im tensor.
* `const cl_mem kernel_buffer`: OpenCL buffer to store the input kernel tensor.
* `const size_t kernel_offset`: The offset in elements from the start of the input kernel tensor.
* `cl_mem result_buffer`: OpenCL buffer to store the output result tensor.
* `const size_t result_offset`: The offset in elements from the start of the output result tensor.
* `cl_command_queue* queue`: Pointer to an OpenCL command queue associated with a context and device to execute the routine on.
* `cl_event* event`: Pointer to an OpenCL event to be able to wait for completion of the routine's OpenCL kernel(s). This is an optional argument.
xAXPYBATCHED: Batched version of AXPY
-------------

View File

@ -181,7 +181,7 @@ ROUTINES = [
Routine(True, True, 0, False, "x", "had", T, [S,D,C,Z,H], ["n"], [], ["x","y"], ["z"], [xn,yn,zn], ["alpha","beta"], "", "Element-wise vector product (Hadamard)", "Performs the Hadamard element-wise product _z = alpha * x * y + beta * z_, in which _x_, _y_, and _z_ are vectors and _alpha_ and _beta_ are scalar constants.", []),
Routine(True, True, 0, False, "x", "omatcopy", T, [S,D,C,Z,H], ["m","n"], ["layout","a_transpose"], ["a"], ["b"], [amn,bnma], ["alpha"], "", "Scaling and out-place transpose/copy (non-BLAS function)", "Performs scaling and out-of-place transposition/copying of matrices according to _B = alpha*op(A)_, in which _A_ is an input matrix (_m_ rows by _n_ columns), _B_ an output matrix, and _alpha_ a scalar value. The operation _op_ can be a normal matrix copy, a transposition or a conjugate transposition.", [ald_m, bld_n]),
Routine(True, True, 0, False, "x", "im2col", T, [S,D,C,Z,H], im2col_constants, [], ["im"], ["col"], [im,col], [""], "", "Im2col function (non-BLAS function)", "Performs the im2col algorithm, in which _im_ is the input matrix and _col_ is the output matrix.", []),
Routine(False, True, 0, False, "x", "convgemm", T, [S,D,C,Z,H], convgemm_constants, [], ["im","kernel"], ["result"], [imb,kernel,result],[""], "", "Batched convolution as GEMM (non-BLAS function)", "Integrates im2col and GEMM for batched convolution, in which _im_ is the 4D input tensor, _kernel_ the 3D kernel weights tensor, and _result_ the 4D output tensor.", []),
Routine(True, True, 0, False, "x", "convgemm", T, [S,D,C,Z,H], convgemm_constants, [], ["im","kernel"], ["result"], [imb,kernel,result],[""], "", "Batched convolution as GEMM (non-BLAS function)", "Integrates im2col and GEMM for batched 3D convolution, in which _im_ is the 4D input tensor, _kernel_ the 4D kernel weights tensor, and _result_ the 4D output tensor.", []),
# Batched routines:
Routine(True, True, 1, False, "x", "axpy", T, [S,D,C,Z,H], ["n"], [], ["x"], ["y"], [xn,yn], ["alpha"], "", "Batched version of AXPY", "As AXPY, but multiple operations are batched together for better performance.", []),
Routine(True, True, 1, False, "x", "gemm", T, [S,D,C,Z,H], ["m","n","k"], ["layout","a_transpose","b_transpose"], ["a","b"], ["c"], [amk,bkn,cmn], ["alpha","beta"], "", "Batched version of GEMM", "As GEMM, but multiple operations are batched together for better performance.", [ald_transa_m_k, bld_transb_k_n, cld_m]),

View File

@ -2254,12 +2254,20 @@ template StatusCode PUBLIC_API Im2col<half>(const size_t, const size_t, const si
// Batched convolution as GEMM (non-BLAS function): SCONVGEMM/DCONVGEMM/CCONVGEMM/ZCONVGEMM/HCONVGEMM
template <typename T>
StatusCode Convgemm(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t,
const cl_mem, const size_t,
const cl_mem, const size_t,
cl_mem, const size_t,
cl_command_queue*, cl_event*) {
return StatusCode::kNotImplemented;
StatusCode Convgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count,
const cl_mem im_buffer, const size_t im_offset,
const cl_mem kernel_buffer, const size_t kernel_offset,
cl_mem result_buffer, const size_t result_offset,
cl_command_queue* queue, cl_event* event) {
try {
auto queue_cpp = Queue(*queue);
auto routine = Xconvgemm<T>(queue_cpp, event);
routine.DoConvgemm(channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, num_kernels, batch_count,
Buffer<T>(im_buffer), im_offset,
Buffer<T>(kernel_buffer), kernel_offset,
Buffer<T>(result_buffer), result_offset);
return StatusCode::kSuccess;
} catch (...) { return DispatchException(); }
}
template StatusCode PUBLIC_API Convgemm<float>(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t,
const cl_mem, const size_t,

View File

@ -2352,12 +2352,22 @@ template StatusCode PUBLIC_API Im2col<half>(const size_t, const size_t, const si
// Batched convolution as GEMM (non-BLAS function): SCONVGEMM/DCONVGEMM/CCONVGEMM/ZCONVGEMM/HCONVGEMM
template <typename T>
StatusCode Convgemm(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t,
const CUdeviceptr, const size_t,
const CUdeviceptr, const size_t,
CUdeviceptr, const size_t,
const CUcontext, const CUdevice) {
return StatusCode::kNotImplemented;
StatusCode Convgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count,
const CUdeviceptr im_buffer, const size_t im_offset,
const CUdeviceptr kernel_buffer, const size_t kernel_offset,
CUdeviceptr result_buffer, const size_t result_offset,
const CUcontext context, const CUdevice device) {
try {
const auto context_cpp = Context(context);
const auto device_cpp = Device(device);
auto queue_cpp = Queue(context_cpp, device_cpp);
auto routine = Xconvgemm<T>(queue_cpp, nullptr);
routine.DoConvgemm(channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, num_kernels, batch_count,
Buffer<T>(im_buffer), im_offset,
Buffer<T>(kernel_buffer), kernel_offset,
Buffer<T>(result_buffer), result_offset);
return StatusCode::kSuccess;
} catch (...) { return DispatchException(); }
}
template StatusCode PUBLIC_API Convgemm<float>(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t,
const CUdeviceptr, const size_t,

View File

@ -0,0 +1,68 @@
// =================================================================================================
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
// width of 100 characters per line.
//
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file implements the Xconvgemm class (see the header for information about the class).
//
// =================================================================================================
#include "routines/levelx/xconvgemm.hpp"
#include <string>
#include <vector>
namespace clblast {
// =================================================================================================
// Constructor: forwards to base class constructor
template <typename T>
Xconvgemm<T>::Xconvgemm(Queue &queue, EventPointer event, const std::string &name):
Routine(queue, event, name, {"Copy"}, PrecisionValue<T>(), {}, {
#include "../../kernels/levelx/im2col.opencl"
}) {
}
// =================================================================================================
template <typename T>
void Xconvgemm<T>::DoConvgemm(const size_t channels, const size_t height, const size_t width,
const size_t kernel_h, const size_t kernel_w, const size_t pad_h,
const size_t pad_w, const size_t stride_h, const size_t stride_w,
const size_t dilation_h, const size_t dilation_w,
const size_t num_kernels, const size_t batch_count,
const Buffer<T> &im_buffer, const size_t im_offset,
const Buffer<T> &kernel_buffer, const size_t kernel_offset,
const Buffer<T> &result_buffer, const size_t result_offset) {
// Makes sure all dimensions are larger than zero
if ((channels == 0) || (height == 0) || (width == 0) || (num_kernels == 0) || (batch_count == 0)) {
throw BLASError(StatusCode::kInvalidDimension);
}
// Sets the output height and width
const auto size_h = height + 2 * pad_h;
const auto padding_h = dilation_h * (kernel_h - 1) + 1;
const auto output_h = (size_h >= padding_h) ? (size_h - padding_h) / stride_h + 1 : 1;
const auto size_w = width + 2 * pad_w;
const auto padding_w = dilation_w * (kernel_w - 1) + 1;
const auto output_w = (size_w >= padding_w) ? (size_w - padding_w) / stride_w + 1 : 1;
throw BLASError(StatusCode::kNotImplemented);
}
// =================================================================================================
// Compiles the templated class
template class Xconvgemm<half>;
template class Xconvgemm<float>;
template class Xconvgemm<double>;
template class Xconvgemm<float2>;
template class Xconvgemm<double2>;
// =================================================================================================
} // namespace clblast

View File

@ -0,0 +1,48 @@
// =================================================================================================
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
// width of 100 characters per line.
//
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file implements the Xconvgemm routine. The precision is implemented as a template argument.
// This implements batched convolution of a 4D input 'image' tensor, a 3D input 'kernel' matrix,
// resulting in a 4D output 'result' tensor.
//
// =================================================================================================
#ifndef CLBLAST_ROUTINES_XCONVGEMM_H_
#define CLBLAST_ROUTINES_XCONVGEMM_H_
#include "routine.hpp"
namespace clblast {
// =================================================================================================
// See comment at top of file for a description of the class
template <typename T>
class Xconvgemm: public Routine {
public:
// Constructor
Xconvgemm(Queue &queue, EventPointer event, const std::string &name = "CONVGEMM");
// Templated-precision implementation of the routine
void DoConvgemm(const size_t channels, const size_t height, const size_t width,
const size_t kernel_h, const size_t kernel_w,
const size_t pad_h, const size_t pad_w,
const size_t stride_h, const size_t stride_w,
const size_t dilation_h, const size_t dilation_w,
const size_t num_kernels, const size_t batch_count,
const Buffer<T> &im_buffer, const size_t im_offset,
const Buffer<T> &kernel_buffer, const size_t kernel_offset,
const Buffer<T> &result_buffer, const size_t result_offset);
};
// =================================================================================================
} // namespace clblast
// CLBLAST_ROUTINES_XCONVGEMM_H_
#endif

View File

@ -70,6 +70,7 @@
#include "routines/levelx/xhad.hpp"
#include "routines/levelx/xomatcopy.hpp"
#include "routines/levelx/xim2col.hpp"
#include "routines/levelx/xconvgemm.hpp"
#include "routines/levelx/xaxpybatched.hpp"
#include "routines/levelx/xgemmbatched.hpp"
#include "routines/levelx/xgemmstridedbatched.hpp"

View File

@ -84,6 +84,7 @@ constexpr auto kArgImaxOffset = "offimax";
constexpr auto kArgAlpha = "alpha";
constexpr auto kArgBeta = "beta";
constexpr auto kArgBatchCount = "batch_num";
constexpr auto kArgNumKernels = "num_kernels";
// Constants for im2col
constexpr auto kArgChannels = "channels";
@ -195,7 +196,7 @@ struct Arguments {
size_t imax_offset = 0;
T alpha = ConstantOne<T>();
T beta = ConstantOne<T>();
// Arguments for im2col
// Arguments for im2col and convgemm
size_t channels = 1;
size_t height = 1;
size_t width = 1;
@ -207,6 +208,7 @@ struct Arguments {
size_t stride_w = 1;
size_t dilation_h = 1;
size_t dilation_w = 1;
size_t num_kernels = 1;
// Batch-specific arguments
size_t batch_count = 1;
std::vector<size_t> x_offsets; // = {0};

View File

@ -0,0 +1,26 @@
// =================================================================================================
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
// width of 100 characters per line.
//
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// =================================================================================================
#include "test/correctness/testblas.hpp"
#include "test/routines/levelx/xconvgemm.hpp"
// Main function (not within the clblast namespace)
int main(int argc, char *argv[]) {
auto errors = size_t{0};
errors += clblast::RunTests<clblast::TestXconvgemm<float>, float, float>(argc, argv, false, "SCONVGEMM");
errors += clblast::RunTests<clblast::TestXconvgemm<double>, double, double>(argc, argv, true, "DCONVGEMM");
errors += clblast::RunTests<clblast::TestXconvgemm<clblast::float2>, clblast::float2, clblast::float2>(argc, argv, true, "CCONVGEMM");
errors += clblast::RunTests<clblast::TestXconvgemm<clblast::double2>, clblast::double2, clblast::double2>(argc, argv, true, "ZCONVGEMM");
errors += clblast::RunTests<clblast::TestXconvgemm<clblast::half>, clblast::half, clblast::half>(argc, argv, true, "HCONVGEMM");
if (errors > 0) { return 1; } else { return 0; }
}
// =================================================================================================

View File

@ -60,6 +60,7 @@ class TestBlas: public Tester<T,U> {
static const std::vector<size_t> kDilationSizes;
static const std::vector<size_t> kKernelSizes;
static const std::vector<size_t> kBatchCounts;
static const std::vector<size_t> kNumKernels;
const std::vector<size_t> kOffsets;
const std::vector<U> kAlphaValues;
const std::vector<U> kBetaValues;
@ -136,6 +137,7 @@ template <typename T, typename U> const std::vector<size_t> TestBlas<T,U>::kBatc
template <typename T, typename U> const std::vector<size_t> TestBlas<T,U>::kPadSizes = { 0, 1 };
template <typename T, typename U> const std::vector<size_t> TestBlas<T,U>::kDilationSizes = { 1, 2 };
template <typename T, typename U> const std::vector<size_t> TestBlas<T,U>::kKernelSizes = { 1, 3 };
template <typename T, typename U> const std::vector<size_t> TestBlas<T,U>::kNumKernels = { 1, 2 };
// Test settings for the invalid tests
template <typename T, typename U> const std::vector<size_t> TestBlas<T,U>::kInvalidIncrements = { 0, 1 };
@ -241,6 +243,7 @@ size_t RunTests(int argc, char *argv[], const bool silent, const std::string &na
auto dilation_hs = std::vector<size_t>{args.dilation_h};
auto dilation_ws = std::vector<size_t>{args.dilation_w};
auto batch_counts = std::vector<size_t>{args.batch_count};
auto num_kernelss = std::vector<size_t>{args.num_kernels};
auto x_sizes = std::vector<size_t>{args.x_size};
auto y_sizes = std::vector<size_t>{args.y_size};
auto a_sizes = std::vector<size_t>{args.a_size};
@ -296,6 +299,7 @@ size_t RunTests(int argc, char *argv[], const bool silent, const std::string &na
if (option == kArgDilationH) { dilation_hs = tester.kDilationSizes; }
if (option == kArgDilationW) { dilation_ws = tester.kDilationSizes; }
if (option == kArgBatchCount) { batch_counts = tester.kBatchCounts; }
if (option == kArgNumKernels) { num_kernelss = tester.kNumKernels; }
if (option == kArgXOffset) { x_sizes = tester.kVecSizes; }
if (option == kArgYOffset) { y_sizes = tester.kVecSizes; }
@ -350,8 +354,10 @@ size_t RunTests(int argc, char *argv[], const bool silent, const std::string &na
for (auto &dilation_h: dilation_hs) { r_args.dilation_h = dilation_h;
for (auto &dilation_w: dilation_ws) { r_args.dilation_w = dilation_w;
for (auto &batch_count: batch_counts) { r_args.batch_count = batch_count;
C::SetSizes(r_args, tester.queue_);
regular_test_vector.push_back(r_args);
for (auto &num_kernels: num_kernelss) { r_args.num_kernels = num_kernels;
C::SetSizes(r_args, tester.queue_);
regular_test_vector.push_back(r_args);
}
}
}
}

View File

@ -0,0 +1,33 @@
// =================================================================================================
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
// width of 100 characters per line.
//
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// =================================================================================================
#include "test/performance/client.hpp"
#include "test/routines/levelx/xconvgemm.hpp"
// Main function (not within the clblast namespace)
int main(int argc, char *argv[]) {
const auto command_line_args = clblast::RetrieveCommandLineArguments(argc, argv);
switch(clblast::GetPrecision(command_line_args, clblast::Precision::kSingle)) {
case clblast::Precision::kHalf:
clblast::RunClient<clblast::TestXconvgemm<clblast::half>, clblast::half, clblast::half>(argc, argv); break;
case clblast::Precision::kSingle:
clblast::RunClient<clblast::TestXconvgemm<float>, float, float>(argc, argv); break;
case clblast::Precision::kDouble:
clblast::RunClient<clblast::TestXconvgemm<double>, double, double>(argc, argv); break;
case clblast::Precision::kComplexSingle:
clblast::RunClient<clblast::TestXconvgemm<clblast::float2>, clblast::float2, clblast::float2>(argc, argv); break;
case clblast::Precision::kComplexDouble:
clblast::RunClient<clblast::TestXconvgemm<clblast::double2>, clblast::double2, clblast::double2>(argc, argv); break;
}
return 0;
}
// =================================================================================================

View File

@ -0,0 +1,219 @@
// =================================================================================================
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
// width of 100 characters per line.
//
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file implements a class with static methods to describe the Xconvgemm routine. Examples of
// such 'descriptions' are how to calculate the size a of buffer or how to run the routine. These
// static methods are used by the correctness tester and the performance tester.
//
// =================================================================================================
#ifndef CLBLAST_TEST_ROUTINES_XCONVGEMM_H_
#define CLBLAST_TEST_ROUTINES_XCONVGEMM_H_
#include "test/routines/common.hpp"
namespace clblast {
// =================================================================================================
// See comment at top of file for a description of the class
template <typename T>
class TestXconvgemm {
public:
// The BLAS level: 4 for the extra routines
static size_t BLASLevel() { return 4; }
// The list of arguments relevant for this routine
static std::vector<std::string> GetOptions() {
return {kArgChannels, kArgHeight, kArgWidth, kArgKernelH, kArgKernelW, kArgPadH, kArgPadW,
kArgStrideH, kArgStrideW, kArgDilationH, kArgDilationW, kArgNumKernels, kArgBatchCount,
kArgAOffset, kArgBOffset, kArgCOffset};
}
static std::vector<std::string> BuffersIn() { return {kBufMatA, kBufMatB, kBufMatC}; }
static std::vector<std::string> BuffersOut() { return {kBufMatC}; }
// Describes how to obtain the sizes of the buffers
static size_t OutputHeight(const Arguments<T> &args) {
const auto size = args.height + 2 * args.pad_h;
const auto padding = args.dilation_h * (args.kernel_h - 1) + 1;
if (size >= padding) { return (size - padding) / args.stride_h + 1; }
return 1;
}
static size_t OutputWidth(const Arguments<T> &args) {
const auto size = args.width + 2 * args.pad_w;
const auto padding = args.dilation_w * (args.kernel_w - 1) + 1;
if (size >= padding) { return (size - padding) / args.stride_w + 1; }
return 1;
}
static size_t NumPatches(const Arguments<T> &args) {
return OutputHeight(args) * OutputWidth(args) * args.channels;
}
static size_t GetSizeA(const Arguments<T> &args) { // 4D: NCHW == batch-channel-height-width
return args.batch_count * args.channels * args.height * args.width + args.a_offset;
}
static size_t GetSizeB(const Arguments<T> &args) { // 4D: KCHW == kernel-channel-height-width
return args.num_kernels * args.channels * args.kernel_h * args.kernel_w + args.b_offset;
}
static size_t GetSizeC(const Arguments<T> &args) { // 4D: NCHW == batch-channel-height-width
return args.batch_count * args.num_kernels * OutputHeight(args) * OutputWidth(args) + args.c_offset;
}
// Describes how to set the sizes of all the buffers
static void SetSizes(Arguments<T> &args, Queue&) {
args.a_size = GetSizeA(args);
args.b_size = GetSizeB(args);
args.c_size = GetSizeC(args);
}
// Describes what the default values of the leading dimensions of the matrices are
static size_t DefaultLDA(const Arguments<T> &) { return 1; } // N/A for this routine
static size_t DefaultLDB(const Arguments<T> &) { return 1; } // N/A for this routine
static size_t DefaultLDC(const Arguments<T> &) { return 1; } // N/A for this routine
// Describes which transpose options are relevant for this routine
using Transposes = std::vector<Transpose>;
static Transposes GetATransposes(const Transposes &) { return {}; } // N/A for this routine
static Transposes GetBTransposes(const Transposes &) { return {}; } // N/A for this routine
// Describes how to prepare the input data
static void PrepareData(const Arguments<T>&, Queue&, const int, std::vector<T>&,
std::vector<T>&, std::vector<T>&, std::vector<T>&, std::vector<T>&,
std::vector<T>&, std::vector<T>&) {} // N/A for this routine
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
#ifdef OPENCL_API
auto queue_plain = queue();
auto event = cl_event{};
auto status = Convgemm<T>(args.channels, args.height, args.width,
args.kernel_h, args.kernel_w,
args.pad_h, args.pad_w,
args.stride_h, args.stride_w,
args.dilation_h, args.dilation_w,
args.num_kernels, args.batch_count,
buffers.a_mat(), args.a_offset,
buffers.b_mat(), args.b_offset,
buffers.c_mat(), args.c_offset,
&queue_plain, &event);
if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); }
#elif CUDA_API
auto status = Convgemm<T>(args.channels, args.height, args.width,
args.kernel_h, args.kernel_w,
args.pad_h, args.pad_w,
args.stride_h, args.stride_w,
args.dilation_h, args.dilation_w,
args.num_kernels, args.batch_count,
buffers.a_mat(), args.a_offset,
buffers.b_mat(), args.b_offset,
buffers.c_mat(), args.c_offset,
queue.GetContext()(), queue.GetDevice()());
cuStreamSynchronize(queue());
#endif
return status;
}
// Describes how to run a naive version of the routine (for correctness/performance comparison).
// Note that a proper clBLAS or CPU BLAS comparison is not available for non-BLAS routines.
static StatusCode RunReference1(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto buffers_host = BuffersHost<T>();
DeviceToHost(args, buffers, buffers_host, queue, BuffersIn());
const auto status = RunReference(args, buffers_host);
HostToDevice(args, buffers, buffers_host, queue, BuffersOut());
return status;
}
static StatusCode RunReference2(const Arguments<T> &args, BuffersHost<T> &buffers_host, Queue&) {
return RunReference(args, buffers_host);
}
static StatusCode RunReference3(const Arguments<T> &, BuffersCUDA<T> &, Queue &) {
return StatusCode::kUnknownError;
}
// Describes how to download the results of the computation (more importantly: which buffer)
static std::vector<T> DownloadResult(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
std::vector<T> result(args.c_size, static_cast<T>(0));
buffers.c_mat.Read(queue, args.c_size, result);
return result;
}
// Describes how to compute the indices of the result buffer
static size_t ResultID1(const Arguments<T> &args) { return OutputHeight(args) * OutputWidth(args); }
static size_t ResultID2(const Arguments<T> &args) { return args.num_kernels * args.batch_count; }
static size_t GetResultIndex(const Arguments<T> &args, const size_t id1, const size_t id2) {
return id1 + OutputHeight(args) * OutputWidth(args) * id2 + args.c_offset;
}
// Describes how to compute performance metrics
static size_t GetFlops(const Arguments<T> &args) {
return args.batch_count; // TODO
}
static size_t GetBytes(const Arguments<T> &args) {
return (GetSizeA(args) + GetSizeB(args) + GetSizeC(args)) * sizeof(T);
}
};
// =================================================================================================
template <typename T>
StatusCode RunReference(const Arguments<T> &args, BuffersHost<T> &buffers_host) {
const auto output_h = TestXconvgemm<T>::OutputHeight(args);
const auto output_w = TestXconvgemm<T>::OutputWidth(args);
for (auto batch_id = size_t{0}; batch_id < args.batch_count; ++batch_id) {
for (auto co_id = size_t{0}; co_id < args.num_kernels; ++co_id) { // output channels == num-kernels
for (auto ho_id = size_t{0}; ho_id < output_h; ++ho_id) { // image height
for (auto wo_id = size_t{0}; wo_id < output_w; ++wo_id) { // image width
auto result = ConstantZero<T>();
// 3D convolution
for (auto ci_id = size_t{0}; ci_id < args.channels; ++ci_id) { // input channels
for (auto kh_id = size_t{0}; kh_id < args.kernel_h; ++kh_id) { // kernel height
for (auto kw_id = size_t{0}; kw_id < args.kernel_w; ++kw_id) { // kernel width
// Retrieves the value from the input image
const auto hi_id = kh_id * args.dilation_h + args.stride_h * ho_id - args.pad_h;
const auto wi_id = kw_id * args.dilation_w + args.stride_w * wo_id - args.pad_w;
if (hi_id >= 0 && hi_id < args.height &&
wi_id >= 0 && wi_id < args.width) {
const auto input_index = wi_id + args.width * (
hi_id + args.height * (
ci_id + args.channels * (
batch_id)));
const auto input_value = buffers_host.a_mat[input_index + args.a_offset];
// Multiplies with the kernel tensor
const auto kernel_index = kw_id + args.kernel_w * (
kh_id + args.kernel_h * (
ci_id + args.channels * (
co_id)));
const auto kernel_value = buffers_host.b_mat[kernel_index + args.b_offset];
result += input_value * kernel_value;
}
}
}
}
// Sets the output value (NCHW == batch-channel-height-width)
const auto output_index = wo_id + output_w * (
ho_id + output_h * (
co_id + args.num_kernels * (
batch_id)));
buffers_host.c_mat[output_index + args.c_offset] = result;
}
}
}
}
return StatusCode::kSuccess;
}
// =================================================================================================
} // namespace clblast
// CLBLAST_TEST_ROUTINES_XCONVGEMM_H_
#endif