Added method selection option to switch between im2col and single-kernel approach for convgemm

pull/319/head
Cedric Nugteren 2018-05-21 11:28:11 +02:00
parent 37cabd4f1f
commit 5d87abf780
4 changed files with 99 additions and 55 deletions

View File

@ -9,6 +9,8 @@
//
// This file contains the an implementation of 3D convolution on a 4D image using GEMM kernels. It
// uses parameters from the direct GEMM kernel. This is the part with the loads from memory (1/2).
// This uses "CONVGEMM_WITH_IM2COL" as a switch to select between direct convgemm or first running
// the im2col kernel to create a 'col' temporary matrix.
//
// =================================================================================================
@ -17,7 +19,7 @@
R"(
// =================================================================================================
#if defined(ROUTINE_CONVGEMM)
#if defined(ROUTINE_CONVGEMM) && !defined(CONVGEMM_WITH_IM2COL)
// Loads global off-chip memory into thread-private register files. This function is specific for
// loading the image input tensor. This includes a bounds check.

View File

@ -9,6 +9,8 @@
//
// This file contains the an implementation of 3D convolution on a 4D image using GEMM kernels. It
// uses parameters from the direct GEMM kernel. This part contains the main kernel (2/2).
// This uses "CONVGEMM_WITH_IM2COL" as a switch to select between direct convgemm or first running
// the im2col kernel to create a 'col' temporary matrix.
//
// =================================================================================================
@ -22,28 +24,37 @@ R"(
// ConvGEMM kernel
__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
void Xconvgemm(const int num_patches, const int num_kernels, const int patch_size,
const __global realMD* restrict colgm, const int col_offset, const int col_stride,
const __global realND* restrict kernelgm, const int kernel_offset,
__global real* resultgm, const int result_offset, const int result_stride,
#if defined(CONVGEMM_WITH_IM2COL)
const __global realMD* restrict colgm, const int col_offset, const int col_stride)
#else
const __global realMD* restrict imagegm, const int image_offset,
const int input_h, const int input_w, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const __global realMD* restrict imagegm, const int image_offset,
const int output_h, const int output_w) {
const int output_h, const int output_w)
#endif
{
// Batch offsets
const int batch = get_group_id(2);
const int image_offset_batch = image_offset + channels * input_h * input_w * batch;
const int col_offset_batch = col_offset + col_stride * batch;
#if defined(CONVGEMM_WITH_IM2COL)
const int col_offset_batch = col_offset + col_stride * batch;
#else
const int image_offset_batch = image_offset + channels * input_h * input_w * batch;
#endif
const int result_offset_batch = result_offset + result_stride * batch;
__local real alm[WGD * (WGD + PADA)];
__local real blm[WGD * (WGD + PADB)];
// Extra pointers to scalar versions of global memory
const __global real* restrict colgms = (const __global real* restrict) colgm;
#if defined(CONVGEMM_WITH_IM2COL)
const __global real* restrict colgms = (const __global real* restrict) colgm;
#endif
const __global real* restrict kernelgms = (const __global real* restrict) kernelgm;
// Allocates workitem-private memory (registers)
@ -63,12 +74,17 @@ void Xconvgemm(const int num_patches, const int num_kernels, const int patch_siz
}
}
// The faster version of GEMM is not allowed on the (incomplete) borders. Therefore, this section
// processes only the main parts: output blocks of WGD by WGD.
// Global m/n indices
const int idm = get_local_id(0) * MWID + GetGroupID0() * WGD;
const int idn = get_local_id(1) * NWID + GetGroupID1() * WGD;
const int w_id = idm % output_w;
const int h_id = idm / output_w;
#if !defined(CONVGEMM_WITH_IM2COL)
const int w_id = idm % output_w;
const int h_id = idm / output_w;
#endif
// The faster version of GEMM is not allowed on the (incomplete) borders. Therefore, this section
// processes only the main parts: output blocks of WGD by WGD.
#if defined(CONVGEMM_WITH_IM2COL) // TEMP: To be implemented for other case as well
if ((idm < (num_patches/WGD)*WGD) && (idn < (num_kernels/WGD)*WGD)) {
// Loops over all complete workgroup tiles (K-dimension)
@ -155,7 +171,9 @@ void Xconvgemm(const int num_patches, const int num_kernels, const int patch_siz
// Simple but slower version for the parts on the edge (incomplete tiles in M and N-dimensions)
else {
#else // TEMP, to be implemented
{ // TEMP, to be implemented
#endif // TEMP, to be implemented
// Loops over all complete workgroup tiles (K-dimension)
int kwg = 0;
for (; kwg < (patch_size/WGD) * WGD; kwg+=WGD) {
@ -207,10 +225,14 @@ void Xconvgemm(const int num_patches, const int num_kernels, const int patch_siz
// Loads data: off-chip --> private
#pragma unroll
for (int _mi = 0; _mi < MWID; _mi += 1) {
#if defined(CONVGEMM_WITH_IM2COL)
apd[_mi] = GlobalToPrivateCheckedA(colgms, _mi, num_patches, col_offset_batch, idm, kwg, false, false, num_patches);
#else
apd[_mi] = GlobalToPrivateCheckedImage(imagegm, image_offset_batch, h_id, w_id, kwg,
input_h, input_w, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w);
#endif
}
#pragma unroll
for (int _ni = 0; _ni < NWID; _ni += 1) {

View File

@ -22,9 +22,11 @@ namespace clblast {
// Constructor: forwards to base class constructor
template <typename T>
Xconvgemm<T>::Xconvgemm(Queue &queue, EventPointer event, const std::string &name):
Xconvgemm<T>::Xconvgemm(Queue &queue, EventPointer event, const std::string &name,
const ConvGemmMethod method):
Routine(queue, event, name, {"XgemmDirect"},
PrecisionValue<T>(), {}, {
(method == ConvGemmMethod::kWithIm2Col) ? "#define CONVGEMM_WITH_IM2COL\n" : "",
#include "../../kernels/level3/level3.opencl"
, // separated in multiple parts to prevent C1091 in MSVC 2013
#include "../../kernels/level3/xgemm_direct_part1.opencl"
@ -33,7 +35,8 @@ Xconvgemm<T>::Xconvgemm(Queue &queue, EventPointer event, const std::string &nam
, // separated in multiple parts to prevent C1091 in MSVC 2013
#include "../../kernels/levelx/xconvgemm_part1.opencl"
#include "../../kernels/levelx/xconvgemm_part2.opencl"
}) {
}),
method_(method) {
}
// =================================================================================================
@ -70,26 +73,29 @@ void Xconvgemm<T>::DoConvgemm(const size_t channels, const size_t height, const
const auto patch_size = kernel_h * kernel_w * channels;
const auto num_patches = output_h * output_w;
// Approach: im2col + GEMM
// Possible approach: im2col + GEMM
// result = GEMM(im2col(image), kernel)
auto col_buffer = Buffer<T>(context_, 0); // nullptr, will be optionally created later
if (method_ == ConvGemmMethod::kWithIm2Col) {
// Temporary col matrix
const auto col_size = patch_size * num_patches * batch_count;
auto col_buffer = Buffer<T>(context_, col_size);
// Temporary col matrix
const auto col_size = (method_ == ConvGemmMethod::kWithIm2Col) ? patch_size * num_patches * batch_count : 1;
col_buffer = Buffer<T>(context_, col_size);
// Loops over each batch
for (auto batch_id = size_t{0}; batch_id < batch_count; ++batch_id) {
// Loops over each batch
for (auto batch_id = size_t{0}; batch_id < batch_count; ++batch_id) {
// im2col
const auto im_batch_offset = batch_id * channels * height * width + im_offset;
const auto col_batch_offset = batch_id * patch_size * num_patches;
auto im2col_event = Event();
auto im2col = Xim2col<T>(queue_, im2col_event.pointer());
im2col.DoIm2col(channels, height, width, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
im_buffer, im_batch_offset,
col_buffer, col_batch_offset);
im2col_event.WaitForCompletion();
// im2col
const auto im_batch_offset = batch_id * channels * height * width + im_offset;
const auto col_batch_offset = batch_id * patch_size * num_patches;
auto im2col_event = Event();
auto im2col = Xim2col<T>(queue_, im2col_event.pointer());
im2col.DoIm2col(channels, height, width, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
im_buffer, im_batch_offset,
col_buffer, col_batch_offset);
im2col_event.WaitForCompletion();
}
}
// Strided batched GEMM: C (result) = alpha (1) * A (col) * B (kernel) + beta (0) * C (result)
@ -99,7 +105,12 @@ void Xconvgemm<T>::DoConvgemm(const size_t channels, const size_t height, const
// Tests the matrices for validity
TestMatrixB(patch_size, num_kernels, kernel_buffer, kernel_offset, patch_size);
for (auto batch = size_t{0}; batch < batch_count; ++batch) {
TestMatrixA(num_patches, patch_size, col_buffer, col_stride * batch, num_patches);
if (method_ == ConvGemmMethod::kWithIm2Col) {
TestMatrixA(num_patches, patch_size, col_buffer, col_stride * batch, num_patches);
}
else {
// TODO: check for valid image tensor
}
TestMatrixC(num_patches, num_kernels, result_buffer, result_offset + result_stride * batch, num_patches);
}
@ -110,29 +121,33 @@ void Xconvgemm<T>::DoConvgemm(const size_t channels, const size_t height, const
kernel.SetArgument(0, static_cast<int>(num_patches));
kernel.SetArgument(1, static_cast<int>(num_kernels));
kernel.SetArgument(2, static_cast<int>(patch_size));
kernel.SetArgument(3, col_buffer());
kernel.SetArgument(4, static_cast<int>(0));
kernel.SetArgument(5, static_cast<int>(col_stride));
kernel.SetArgument(6, kernel_buffer());
kernel.SetArgument(7, static_cast<int>(kernel_offset));
kernel.SetArgument(8, result_buffer());
kernel.SetArgument(9, static_cast<int>(result_offset));
kernel.SetArgument(10, static_cast<int>(result_stride));
kernel.SetArgument(11, static_cast<int>(height));
kernel.SetArgument(12, static_cast<int>(width));
kernel.SetArgument(13, static_cast<int>(channels));
kernel.SetArgument(14, static_cast<int>(kernel_h));
kernel.SetArgument(15, static_cast<int>(kernel_w));
kernel.SetArgument(16, static_cast<int>(pad_h));
kernel.SetArgument(17, static_cast<int>(pad_w));
kernel.SetArgument(18, static_cast<int>(stride_h));
kernel.SetArgument(19, static_cast<int>(stride_w));
kernel.SetArgument(20, static_cast<int>(dilation_h));
kernel.SetArgument(21, static_cast<int>(dilation_w));
kernel.SetArgument(22, im_buffer());
kernel.SetArgument(23, static_cast<int>(im_offset));
kernel.SetArgument(24, static_cast<int>(output_h));
kernel.SetArgument(25, static_cast<int>(output_w));
kernel.SetArgument(3, kernel_buffer());
kernel.SetArgument(4, static_cast<int>(kernel_offset));
kernel.SetArgument(5, result_buffer());
kernel.SetArgument(6, static_cast<int>(result_offset));
kernel.SetArgument(7, static_cast<int>(result_stride));
if (method_ == ConvGemmMethod::kWithIm2Col) {
kernel.SetArgument(8, col_buffer());
kernel.SetArgument(9, static_cast<int>(0));
kernel.SetArgument(10, static_cast<int>(col_stride));
}
if (method_ == ConvGemmMethod::kSingleKernel) {
kernel.SetArgument(8, im_buffer());
kernel.SetArgument(9, static_cast<int>(im_offset));
kernel.SetArgument(10, static_cast<int>(height));
kernel.SetArgument(11, static_cast<int>(width));
kernel.SetArgument(12, static_cast<int>(channels));
kernel.SetArgument(13, static_cast<int>(kernel_h));
kernel.SetArgument(14, static_cast<int>(kernel_w));
kernel.SetArgument(15, static_cast<int>(pad_h));
kernel.SetArgument(16, static_cast<int>(pad_w));
kernel.SetArgument(17, static_cast<int>(stride_h));
kernel.SetArgument(18, static_cast<int>(stride_w));
kernel.SetArgument(19, static_cast<int>(dilation_h));
kernel.SetArgument(20, static_cast<int>(dilation_w));
kernel.SetArgument(21, static_cast<int>(output_h));
kernel.SetArgument(22, static_cast<int>(output_w));
}
// Computes the global and local thread sizes
const auto m_ceiled = Ceil(num_patches, db_["WGD"]);

View File

@ -27,7 +27,9 @@ class Xconvgemm: public Routine {
public:
// Constructor
Xconvgemm(Queue &queue, EventPointer event, const std::string &name = "CONVGEMM");
enum class ConvGemmMethod {kWithIm2Col, kSingleKernel};
Xconvgemm(Queue &queue, EventPointer event, const std::string &name = "CONVGEMM",
const ConvGemmMethod method = ConvGemmMethod::kSingleKernel);
// Templated-precision implementation of the routine
void DoConvgemm(const size_t channels, const size_t height, const size_t width,
@ -39,6 +41,9 @@ class Xconvgemm: public Routine {
const Buffer<T> &im_buffer, const size_t im_offset,
const Buffer<T> &kernel_buffer, const size_t kernel_offset,
const Buffer<T> &result_buffer, const size_t result_offset);
private:
const ConvGemmMethod method_;
};
// =================================================================================================