mirror of
https://github.com/CNugteren/CLBlast.git
synced 2024-08-24 05:52:27 +02:00
Merge branch 'gemm_direct' into development
This commit is contained in:
commit
2194dee217
|
@ -1,8 +1,9 @@
|
||||||
|
|
||||||
Development version (next release)
|
Development version (next release)
|
||||||
- It is now possible to set OpenCL compiler options through the env variable CLBLAST_BUILD_OPTIONS
|
|
||||||
- Fixed a bug in the tests and samples related to waiting for an invalid event
|
|
||||||
- Updated to version 8.0 of the CLCudaAPI C++11 OpenCL header
|
- Updated to version 8.0 of the CLCudaAPI C++11 OpenCL header
|
||||||
|
- Improved performance of GEMM kernels for small sizes by using a direct single-kernel implementation
|
||||||
|
- Fixed a bug in the tests and samples related to waiting for an invalid event
|
||||||
|
- Added an option to set OpenCL compiler options through the env variable CLBLAST_BUILD_OPTIONS
|
||||||
- Added an option to run tuned kernels multiple times to average execution times
|
- Added an option to run tuned kernels multiple times to average execution times
|
||||||
- Various minor fixes and enhancements
|
- Various minor fixes and enhancements
|
||||||
|
|
||||||
|
|
|
@ -134,7 +134,8 @@ endif()
|
||||||
# ==================================================================================================
|
# ==================================================================================================
|
||||||
|
|
||||||
# Sets the supported routines and the used kernels. New routines and kernels should be added here.
|
# Sets the supported routines and the used kernels. New routines and kernels should be added here.
|
||||||
set(KERNELS copy_fast copy_pad transpose_fast transpose_pad xaxpy xdot xger xgemm xgemv)
|
set(KERNELS copy_fast copy_pad transpose_fast transpose_pad xaxpy xdot xger
|
||||||
|
xgemm xgemm_direct xgemv)
|
||||||
set(SAMPLE_PROGRAMS_CPP sgemm)
|
set(SAMPLE_PROGRAMS_CPP sgemm)
|
||||||
set(SAMPLE_PROGRAMS_C sasum dgemv sgemm haxpy cache)
|
set(SAMPLE_PROGRAMS_C sasum dgemv sgemm haxpy cache)
|
||||||
set(LEVEL1_ROUTINES xswap xscal xcopy xaxpy xdot xdotu xdotc xnrm2 xasum xamax)
|
set(LEVEL1_ROUTINES xswap xscal xcopy xaxpy xdot xdotu xdotc xnrm2 xasum xamax)
|
||||||
|
|
|
@ -31,8 +31,12 @@ options("width"=170)
|
||||||
|
|
||||||
# ==================================================================================================
|
# ==================================================================================================
|
||||||
|
|
||||||
# Constants
|
# Settings
|
||||||
num_runs <- 4
|
num_runs <- 5
|
||||||
|
num_runs_short <- 50
|
||||||
|
xtics_subset_threshold <- 100
|
||||||
|
xtics_subset_stepsize <- 8
|
||||||
|
|
||||||
devices <- c("-platform","-device")
|
devices <- c("-platform","-device")
|
||||||
options_string <- "-q -no_abbrv -cblas 0"
|
options_string <- "-q -no_abbrv -cblas 0"
|
||||||
library_names <- c("CLBlast", "clBLAS")
|
library_names <- c("CLBlast", "clBLAS")
|
||||||
|
@ -66,11 +70,21 @@ main <- function(routine_name, precision, test_names, test_values,
|
||||||
executable <- paste("./clblast_client_", routine_name, sep="")
|
executable <- paste("./clblast_client_", routine_name, sep="")
|
||||||
|
|
||||||
# Configures the outputfile
|
# Configures the outputfile
|
||||||
pdf(paste(display_name, ".pdf", sep=""), height=8, width=13)
|
file_name <- paste(display_name, ".pdf", sep="")
|
||||||
|
if (length(test_names) == 6) {
|
||||||
|
pdf(file_name, height=8, width=13)
|
||||||
par(mfrow=c(2, 3))
|
par(mfrow=c(2, 3))
|
||||||
par(oma=c(0, 0, 0, 0))
|
par(oma=c(0, 0, 0, 0))
|
||||||
par(mar=c(4.6, 4.4, 1.5, 0)) # bottom, left, top, right [c(5.1, 4.1, 4.1, 2.1)]
|
par(mar=c(4.6, 4.4, 1.5, 0)) # bottom, left, top, right [c(5.1, 4.1, 4.1, 2.1)]
|
||||||
par(mgp=c(2.8, 0.6, 0)) # location of xlab/ylab, tick-mark labels, tick marks [c(3, 1, 0)]
|
par(mgp=c(2.8, 0.6, 0)) # location of xlab/ylab, tick-mark labels, tick marks [c(3, 1, 0)]
|
||||||
|
}
|
||||||
|
else { # length(test_names) == 2
|
||||||
|
pdf(file_name, height=8, width=13)
|
||||||
|
par(mfrow=c(2, 1))
|
||||||
|
par(oma=c(0, 0, 0, 0))
|
||||||
|
par(mar=c(4.6, 4.4, 1.5, 0)) # bottom, left, top, right [c(5.1, 4.1, 4.1, 2.1)]
|
||||||
|
par(mgp=c(2.8, 0.6, 0)) # location of xlab/ylab, tick-mark labels, tick marks [c(3, 1, 0)]
|
||||||
|
}
|
||||||
|
|
||||||
# Loops over the test-cases
|
# Loops over the test-cases
|
||||||
for (test_id in 1:length(test_names)) {
|
for (test_id in 1:length(test_names)) {
|
||||||
|
@ -169,7 +183,12 @@ plot_graph <- function(xdata, ydata, log_setting,
|
||||||
main="", xlab="", ylab="",
|
main="", xlab="", ylab="",
|
||||||
ylim=c(ymin, ymax), xlim=c(xmin, xmax), axes=F, "n")
|
ylim=c(ymin, ymax), xlim=c(xmin, xmax), axes=F, "n")
|
||||||
axis(side=2, las=2)
|
axis(side=2, las=2)
|
||||||
|
if (length(xdata) > xtics_subset_threshold) { # Too many indices to print, plot only every Nth
|
||||||
|
subset <- seq(from=1, to=length(xdata), by=xtics_subset_stepsize)
|
||||||
|
axis(side=1, at=xdata[subset], labels=xtics[subset], las=2)
|
||||||
|
} else {
|
||||||
axis(side=1, at=xdata, labels=xtics, las=2)
|
axis(side=1, at=xdata, labels=xtics, las=2)
|
||||||
|
}
|
||||||
title(xlab=xlabel, line=-1)
|
title(xlab=xlabel, line=-1)
|
||||||
title(ylab=ylabel, line=2)
|
title(ylab=ylabel, line=2)
|
||||||
title(graph_title, line=-2)
|
title(graph_title, line=-2)
|
||||||
|
|
56
scripts/graphs/xgemm_small.r
Normal file
56
scripts/graphs/xgemm_small.r
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
|
||||||
|
# ==================================================================================================
|
||||||
|
# This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
|
||||||
|
# project uses a tab-size of two spaces and a max-width of 100 characters per line.
|
||||||
|
#
|
||||||
|
# Author(s):
|
||||||
|
# Cedric Nugteren <www.cedricnugteren.nl>
|
||||||
|
#
|
||||||
|
# This file implements the performance script for small sizes of Xgemm, testing the direct kernel
|
||||||
|
#
|
||||||
|
# ==================================================================================================
|
||||||
|
|
||||||
|
# Includes the common functions
|
||||||
|
args <- commandArgs(trailingOnly = FALSE)
|
||||||
|
thisfile <- (normalizePath(sub("--file=", "", args[grep("--file=", args)])))
|
||||||
|
source(file.path(dirname(thisfile), "common.r"))
|
||||||
|
|
||||||
|
# ==================================================================================================
|
||||||
|
|
||||||
|
# Settings
|
||||||
|
routine_name <- "xgemm"
|
||||||
|
parameters <- c("-m","-n","-k","-layout","-transA","-transB",
|
||||||
|
"-num_steps","-step","-runs","-precision")
|
||||||
|
precision <- 32
|
||||||
|
|
||||||
|
# Sets the names of the test-cases
|
||||||
|
test_names <- list(
|
||||||
|
"small matrices in steps of 16",
|
||||||
|
"small matrices in steps of 1"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Defines the test-cases
|
||||||
|
test_values <- list(
|
||||||
|
list(c( 128, 128, 128, 102, 111, 111, 57, 16, num_runs_short, precision)),
|
||||||
|
list(c( 128, 128, 128, 102, 111, 111, 385, 1, num_runs_short, precision))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Defines the x-labels corresponding to the test-cases
|
||||||
|
test_xlabels <- list(
|
||||||
|
"matrix sizes (m=n=k)",
|
||||||
|
"matrix sizes (m=n=k)"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Defines the x-axis of the test-cases
|
||||||
|
test_xaxis <- list(
|
||||||
|
c("m", ""),
|
||||||
|
c("m", "")
|
||||||
|
)
|
||||||
|
|
||||||
|
# ==================================================================================================
|
||||||
|
|
||||||
|
# Start the script
|
||||||
|
main(routine_name=routine_name, precision=precision, test_names=test_names, test_values=test_values,
|
||||||
|
test_xlabels=test_xlabels, test_xaxis=test_xaxis, metric_gflops=TRUE)
|
||||||
|
|
||||||
|
# ==================================================================================================
|
|
@ -21,10 +21,12 @@
|
||||||
#include "database/kernels/xgemv_fast_rot.hpp"
|
#include "database/kernels/xgemv_fast_rot.hpp"
|
||||||
#include "database/kernels/xger.hpp"
|
#include "database/kernels/xger.hpp"
|
||||||
#include "database/kernels/xgemm.hpp"
|
#include "database/kernels/xgemm.hpp"
|
||||||
|
#include "database/kernels/xgemm_direct.hpp"
|
||||||
#include "database/kernels/copy.hpp"
|
#include "database/kernels/copy.hpp"
|
||||||
#include "database/kernels/pad.hpp"
|
#include "database/kernels/pad.hpp"
|
||||||
#include "database/kernels/transpose.hpp"
|
#include "database/kernels/transpose.hpp"
|
||||||
#include "database/kernels/padtranspose.hpp"
|
#include "database/kernels/padtranspose.hpp"
|
||||||
|
#include "database/kernel_selection.hpp"
|
||||||
|
|
||||||
namespace clblast {
|
namespace clblast {
|
||||||
// =================================================================================================
|
// =================================================================================================
|
||||||
|
@ -38,10 +40,12 @@ const std::vector<Database::DatabaseEntry> Database::database = {
|
||||||
XgemvFastRotHalf, XgemvFastRotSingle, XgemvFastRotDouble, XgemvFastRotComplexSingle, XgemvFastRotComplexDouble,
|
XgemvFastRotHalf, XgemvFastRotSingle, XgemvFastRotDouble, XgemvFastRotComplexSingle, XgemvFastRotComplexDouble,
|
||||||
XgerHalf, XgerSingle, XgerDouble, XgerComplexSingle, XgerComplexDouble,
|
XgerHalf, XgerSingle, XgerDouble, XgerComplexSingle, XgerComplexDouble,
|
||||||
XgemmHalf, XgemmSingle, XgemmDouble, XgemmComplexSingle, XgemmComplexDouble,
|
XgemmHalf, XgemmSingle, XgemmDouble, XgemmComplexSingle, XgemmComplexDouble,
|
||||||
|
XgemmDirectHalf, XgemmDirectSingle, XgemmDirectDouble, XgemmDirectComplexSingle, XgemmDirectComplexDouble,
|
||||||
CopyHalf, CopySingle, CopyDouble, CopyComplexSingle, CopyComplexDouble,
|
CopyHalf, CopySingle, CopyDouble, CopyComplexSingle, CopyComplexDouble,
|
||||||
PadHalf, PadSingle, PadDouble, PadComplexSingle, PadComplexDouble,
|
PadHalf, PadSingle, PadDouble, PadComplexSingle, PadComplexDouble,
|
||||||
TransposeHalf, TransposeSingle, TransposeDouble, TransposeComplexSingle, TransposeComplexDouble,
|
TransposeHalf, TransposeSingle, TransposeDouble, TransposeComplexSingle, TransposeComplexDouble,
|
||||||
PadtransposeHalf, PadtransposeSingle, PadtransposeDouble, PadtransposeComplexSingle, PadtransposeComplexDouble
|
PadtransposeHalf, PadtransposeSingle, PadtransposeDouble, PadtransposeComplexSingle, PadtransposeComplexDouble,
|
||||||
|
KernelSelectionHalf, KernelSelectionSingle, KernelSelectionDouble, KernelSelectionComplexSingle, KernelSelectionComplexDouble
|
||||||
};
|
};
|
||||||
|
|
||||||
// =================================================================================================
|
// =================================================================================================
|
||||||
|
|
|
@ -75,10 +75,12 @@ class Database {
|
||||||
static const DatabaseEntry XgemvFastRotHalf, XgemvFastRotSingle, XgemvFastRotDouble, XgemvFastRotComplexSingle, XgemvFastRotComplexDouble;
|
static const DatabaseEntry XgemvFastRotHalf, XgemvFastRotSingle, XgemvFastRotDouble, XgemvFastRotComplexSingle, XgemvFastRotComplexDouble;
|
||||||
static const DatabaseEntry XgerHalf, XgerSingle, XgerDouble, XgerComplexSingle, XgerComplexDouble;
|
static const DatabaseEntry XgerHalf, XgerSingle, XgerDouble, XgerComplexSingle, XgerComplexDouble;
|
||||||
static const DatabaseEntry XgemmHalf, XgemmSingle, XgemmDouble, XgemmComplexSingle, XgemmComplexDouble;
|
static const DatabaseEntry XgemmHalf, XgemmSingle, XgemmDouble, XgemmComplexSingle, XgemmComplexDouble;
|
||||||
|
static const DatabaseEntry XgemmDirectHalf, XgemmDirectSingle, XgemmDirectDouble, XgemmDirectComplexSingle, XgemmDirectComplexDouble;
|
||||||
static const DatabaseEntry CopyHalf, CopySingle, CopyDouble, CopyComplexSingle, CopyComplexDouble;
|
static const DatabaseEntry CopyHalf, CopySingle, CopyDouble, CopyComplexSingle, CopyComplexDouble;
|
||||||
static const DatabaseEntry PadHalf, PadSingle, PadDouble, PadComplexSingle, PadComplexDouble;
|
static const DatabaseEntry PadHalf, PadSingle, PadDouble, PadComplexSingle, PadComplexDouble;
|
||||||
static const DatabaseEntry TransposeHalf, TransposeSingle, TransposeDouble, TransposeComplexSingle, TransposeComplexDouble;
|
static const DatabaseEntry TransposeHalf, TransposeSingle, TransposeDouble, TransposeComplexSingle, TransposeComplexDouble;
|
||||||
static const DatabaseEntry PadtransposeHalf, PadtransposeSingle, PadtransposeDouble, PadtransposeComplexSingle, PadtransposeComplexDouble;
|
static const DatabaseEntry PadtransposeHalf, PadtransposeSingle, PadtransposeDouble, PadtransposeComplexSingle, PadtransposeComplexDouble;
|
||||||
|
static const DatabaseEntry KernelSelectionHalf, KernelSelectionSingle, KernelSelectionDouble, KernelSelectionComplexSingle, KernelSelectionComplexDouble;
|
||||||
static const std::vector<DatabaseEntry> database;
|
static const std::vector<DatabaseEntry> database;
|
||||||
|
|
||||||
// The constructor with a user-provided database overlay (potentially an empty vector)
|
// The constructor with a user-provided database overlay (potentially an empty vector)
|
||||||
|
|
129
src/database/kernel_selection.hpp
Normal file
129
src/database/kernel_selection.hpp
Normal file
|
@ -0,0 +1,129 @@
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
|
||||||
|
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
|
||||||
|
// width of 100 characters per line.
|
||||||
|
//
|
||||||
|
// Author(s):
|
||||||
|
// Cedric Nugteren <www.cedricnugteren.nl>
|
||||||
|
//
|
||||||
|
// This determines when to switch between the direct (for small sizes) and in-direct GEMM kernel
|
||||||
|
// with pre/post-processing kernels (for larger sizes). These can be set in a similar way as for the
|
||||||
|
// regular kernel tuning parameters: they can be specific for a certain vendor or device or can use
|
||||||
|
// some common default values.
|
||||||
|
//
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
namespace clblast {
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
const Database::DatabaseEntry Database::KernelSelectionHalf = {
|
||||||
|
"KernelSelection", Precision::kHalf, {
|
||||||
|
{ // Intel GPUs
|
||||||
|
kDeviceTypeGPU, "Intel", {
|
||||||
|
{ "default", { {"XGEMM_MIN_INDIRECT_SIZE",384*384*384} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ // NVIDIA GPUs
|
||||||
|
kDeviceTypeGPU, "NVIDIA", {
|
||||||
|
{ "default", { {"XGEMM_MIN_INDIRECT_SIZE",768*768*768} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ // Default
|
||||||
|
kDeviceTypeAll, "default", {
|
||||||
|
{ "default", { {"XGEMM_MIN_INDIRECT_SIZE",512*512*512} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
const Database::DatabaseEntry Database::KernelSelectionSingle = {
|
||||||
|
"KernelSelection", Precision::kSingle, {
|
||||||
|
{ // Intel GPUs
|
||||||
|
kDeviceTypeGPU, "Intel", {
|
||||||
|
{ "default", { {"XGEMM_MIN_INDIRECT_SIZE",384*384*384} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ // NVIDIA GPUs
|
||||||
|
kDeviceTypeGPU, "NVIDIA", {
|
||||||
|
{ "default", { {"XGEMM_MIN_INDIRECT_SIZE",768*768*768} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ // Default
|
||||||
|
kDeviceTypeAll, "default", {
|
||||||
|
{ "default", { {"XGEMM_MIN_INDIRECT_SIZE",512*512*512} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
const Database::DatabaseEntry Database::KernelSelectionComplexSingle = {
|
||||||
|
"KernelSelection", Precision::kComplexSingle, {
|
||||||
|
{ // Intel GPUs
|
||||||
|
kDeviceTypeGPU, "Intel", {
|
||||||
|
{ "default", { {"XGEMM_MIN_INDIRECT_SIZE",384*384*384} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ // NVIDIA GPUs
|
||||||
|
kDeviceTypeGPU, "NVIDIA", {
|
||||||
|
{ "default", { {"XGEMM_MIN_INDIRECT_SIZE",768*768*768} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ // Default
|
||||||
|
kDeviceTypeAll, "default", {
|
||||||
|
{ "default", { {"XGEMM_MIN_INDIRECT_SIZE",512*512*512} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
const Database::DatabaseEntry Database::KernelSelectionDouble = {
|
||||||
|
"KernelSelection", Precision::kDouble, {
|
||||||
|
{ // Intel GPUs
|
||||||
|
kDeviceTypeGPU, "Intel", {
|
||||||
|
{ "default", { {"XGEMM_MIN_INDIRECT_SIZE",384*384*384} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ // NVIDIA GPUs
|
||||||
|
kDeviceTypeGPU, "NVIDIA", {
|
||||||
|
{ "default", { {"XGEMM_MIN_INDIRECT_SIZE",768*768*768} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ // Default
|
||||||
|
kDeviceTypeAll, "default", {
|
||||||
|
{ "default", { {"XGEMM_MIN_INDIRECT_SIZE",512*512*512} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
const Database::DatabaseEntry Database::KernelSelectionComplexDouble = {
|
||||||
|
"KernelSelection", Precision::kComplexDouble, {
|
||||||
|
{ // Intel GPUs
|
||||||
|
kDeviceTypeGPU, "Intel", {
|
||||||
|
{ "default", { {"XGEMM_MIN_INDIRECT_SIZE",384*384*384} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ // NVIDIA GPUs
|
||||||
|
kDeviceTypeGPU, "NVIDIA", {
|
||||||
|
{ "default", { {"XGEMM_MIN_INDIRECT_SIZE",768*768*768} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ // Default
|
||||||
|
kDeviceTypeAll, "default", {
|
||||||
|
{ "default", { {"XGEMM_MIN_INDIRECT_SIZE",512*512*512} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
} // namespace clblast
|
|
@ -59,8 +59,8 @@ const Database::DatabaseEntry Database::XgemmSingle = {
|
||||||
{ "Intel(R) HD Graphics Haswell Ultrabook GT2 Mobile", { {"KWG",16}, {"KWI",2}, {"MDIMA",16}, {"MDIMC",8}, {"MWG",32}, {"NDIMB",8}, {"NDIMC",16}, {"NWG",128}, {"SA",1}, {"SB",1}, {"STRM",1}, {"STRN",1}, {"VWM",2}, {"VWN",4} } },
|
{ "Intel(R) HD Graphics Haswell Ultrabook GT2 Mobile", { {"KWG",16}, {"KWI",2}, {"MDIMA",16}, {"MDIMC",8}, {"MWG",32}, {"NDIMB",8}, {"NDIMC",16}, {"NWG",128}, {"SA",1}, {"SB",1}, {"STRM",1}, {"STRN",1}, {"VWM",2}, {"VWN",4} } },
|
||||||
{ "Intel(R) HD Graphics Skylake ULT GT2", { {"KWG",32}, {"KWI",8}, {"MDIMA",16}, {"MDIMC",16}, {"MWG",64}, {"NDIMB",16}, {"NDIMC",16}, {"NWG",128}, {"SA",0}, {"SB",0}, {"STRM",0}, {"STRN",1}, {"VWM",1}, {"VWN",8} } },
|
{ "Intel(R) HD Graphics Skylake ULT GT2", { {"KWG",32}, {"KWI",8}, {"MDIMA",16}, {"MDIMC",16}, {"MWG",64}, {"NDIMB",16}, {"NDIMC",16}, {"NWG",128}, {"SA",0}, {"SB",0}, {"STRM",0}, {"STRN",1}, {"VWM",1}, {"VWN",8} } },
|
||||||
{ "Iris", { {"KWG",16}, {"KWI",8}, {"MDIMA",16}, {"MDIMC",8}, {"MWG",128}, {"NDIMB",32}, {"NDIMC",16}, {"NWG",64}, {"SA",1}, {"SB",1}, {"STRM",1}, {"STRN",1}, {"VWM",4}, {"VWN",1} } },
|
{ "Iris", { {"KWG",16}, {"KWI",8}, {"MDIMA",16}, {"MDIMC",8}, {"MWG",128}, {"NDIMB",32}, {"NDIMC",16}, {"NWG",64}, {"SA",1}, {"SB",1}, {"STRM",1}, {"STRN",1}, {"VWM",4}, {"VWN",1} } },
|
||||||
{ "Iris Pro", { {"KWG",32}, {"KWI",8}, {"MDIMA",16}, {"MDIMC",8}, {"MWG",64}, {"NDIMB",8}, {"NDIMC",8}, {"NWG",64}, {"SA",1}, {"SB",0}, {"STRM",1}, {"STRN",0}, {"VWM",4}, {"VWN",4} } },
|
{ "Iris Pro", { {"KWG",16}, {"KWI",2}, {"MDIMA",16}, {"MDIMC",8}, {"MWG",64}, {"NDIMB",32}, {"NDIMC",32}, {"NWG",128}, {"SA",1}, {"SB",1}, {"STRM",1}, {"STRN",0}, {"VWM",4}, {"VWN",4} } },
|
||||||
{ "default", { {"KWG",16}, {"KWI",2}, {"MDIMA",8}, {"MDIMC",8}, {"MWG",32}, {"NDIMB",8}, {"NDIMC",8}, {"NWG",64}, {"SA",0}, {"SB",0}, {"STRM",0}, {"STRN",0}, {"VWM",1}, {"VWN",1} } },
|
{ "default", { {"KWG",16}, {"KWI",2}, {"MDIMA",8}, {"MDIMC",8}, {"MWG",32}, {"NDIMB",8}, {"NDIMC",16}, {"NWG",64}, {"SA",0}, {"SB",0}, {"STRM",0}, {"STRN",0}, {"VWM",1}, {"VWN",1} } },
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{ // Intel accelerators
|
{ // Intel accelerators
|
||||||
|
|
136
src/database/kernels/xgemm_direct.hpp
Normal file
136
src/database/kernels/xgemm_direct.hpp
Normal file
|
@ -0,0 +1,136 @@
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
|
||||||
|
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
|
||||||
|
// width of 100 characters per line.
|
||||||
|
//
|
||||||
|
// Author(s):
|
||||||
|
// Database generator <database.py>
|
||||||
|
//
|
||||||
|
// This file populates the database with best-found tuning parameters for the 'Xgemm_Direct' kernels.
|
||||||
|
//
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
namespace clblast {
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
const Database::DatabaseEntry Database::XgemmDirectHalf = {
|
||||||
|
"XgemmDirect", Precision::kHalf, {
|
||||||
|
{ // Default
|
||||||
|
kDeviceTypeAll, "default", {
|
||||||
|
{ "default", { {"KWID",2}, {"MDIMAD",8}, {"MDIMCD",8}, {"NDIMBD",8}, {"NDIMCD",8}, {"PADA",1}, {"PADB",1}, {"VWMD",4}, {"VWND",4}, {"WGD",32} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
const Database::DatabaseEntry Database::XgemmDirectSingle = {
|
||||||
|
"XgemmDirect", Precision::kSingle, {
|
||||||
|
{ // AMD GPUs
|
||||||
|
kDeviceTypeGPU, "AMD", {
|
||||||
|
{ "AMD Radeon R9 M370X Compute Engine", { {"KWID",2}, {"MDIMAD",8}, {"MDIMCD",8}, {"NDIMBD",8}, {"NDIMCD",8}, {"PADA",1}, {"PADB",1}, {"VWMD",2}, {"VWND",2}, {"WGD",32} } },
|
||||||
|
{ "default", { {"KWID",2}, {"MDIMAD",8}, {"MDIMCD",8}, {"NDIMBD",8}, {"NDIMCD",8}, {"PADA",1}, {"PADB",1}, {"VWMD",2}, {"VWND",2}, {"WGD",32} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ // Intel GPUs
|
||||||
|
kDeviceTypeGPU, "Intel", {
|
||||||
|
{ "Iris Pro", { {"KWID",2}, {"MDIMAD",16}, {"MDIMCD",16}, {"NDIMBD",8}, {"NDIMCD",8}, {"PADA",1}, {"PADB",1}, {"VWMD",2}, {"VWND",4}, {"WGD",32} } },
|
||||||
|
{ "default", { {"KWID",2}, {"MDIMAD",16}, {"MDIMCD",16}, {"NDIMBD",8}, {"NDIMCD",8}, {"PADA",1}, {"PADB",1}, {"VWMD",2}, {"VWND",4}, {"WGD",32} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ // NVIDIA GPUs
|
||||||
|
kDeviceTypeGPU, "NVIDIA", {
|
||||||
|
{ "GeForce GTX 750 Ti", { {"KWID",2}, {"MDIMAD",8}, {"MDIMCD",8}, {"NDIMBD",8}, {"NDIMCD",8}, {"PADA",1}, {"PADB",1}, {"VWMD",4}, {"VWND",4}, {"WGD",32} } },
|
||||||
|
{ "default", { {"KWID",2}, {"MDIMAD",8}, {"MDIMCD",8}, {"NDIMBD",8}, {"NDIMCD",8}, {"PADA",1}, {"PADB",1}, {"VWMD",4}, {"VWND",4}, {"WGD",32} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ // Default
|
||||||
|
kDeviceTypeAll, "default", {
|
||||||
|
{ "default", { {"KWID",2}, {"MDIMAD",8}, {"MDIMCD",8}, {"NDIMBD",8}, {"NDIMCD",8}, {"PADA",1}, {"PADB",1}, {"VWMD",4}, {"VWND",4}, {"WGD",32} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
const Database::DatabaseEntry Database::XgemmDirectComplexSingle = {
|
||||||
|
"XgemmDirect", Precision::kComplexSingle, {
|
||||||
|
{ // AMD GPUs
|
||||||
|
kDeviceTypeGPU, "AMD", {
|
||||||
|
{ "AMD Radeon R9 M370X Compute Engine", { {"KWID",2}, {"MDIMAD",16}, {"MDIMCD",16}, {"NDIMBD",16}, {"NDIMCD",16}, {"PADA",1}, {"PADB",1}, {"VWMD",1}, {"VWND",1}, {"WGD",16} } },
|
||||||
|
{ "default", { {"KWID",2}, {"MDIMAD",16}, {"MDIMCD",16}, {"NDIMBD",16}, {"NDIMCD",16}, {"PADA",1}, {"PADB",1}, {"VWMD",1}, {"VWND",1}, {"WGD",16} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ // Intel GPUs
|
||||||
|
kDeviceTypeGPU, "Intel", {
|
||||||
|
{ "Iris Pro", { {"KWID",2}, {"MDIMAD",16}, {"MDIMCD",16}, {"NDIMBD",8}, {"NDIMCD",8}, {"PADA",1}, {"PADB",1}, {"VWMD",2}, {"VWND",2}, {"WGD",32} } },
|
||||||
|
{ "default", { {"KWID",2}, {"MDIMAD",16}, {"MDIMCD",16}, {"NDIMBD",8}, {"NDIMCD",8}, {"PADA",1}, {"PADB",1}, {"VWMD",2}, {"VWND",2}, {"WGD",32} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ // NVIDIA GPUs
|
||||||
|
kDeviceTypeGPU, "NVIDIA", {
|
||||||
|
{ "GeForce GTX 750 Ti", { {"KWID",2}, {"MDIMAD",8}, {"MDIMCD",8}, {"NDIMBD",8}, {"NDIMCD",8}, {"PADA",1}, {"PADB",1}, {"VWMD",1}, {"VWND",2}, {"WGD",16} } },
|
||||||
|
{ "default", { {"KWID",2}, {"MDIMAD",8}, {"MDIMCD",8}, {"NDIMBD",8}, {"NDIMCD",8}, {"PADA",1}, {"PADB",1}, {"VWMD",1}, {"VWND",2}, {"WGD",16} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ // Default
|
||||||
|
kDeviceTypeAll, "default", {
|
||||||
|
{ "default", { {"KWID",2}, {"MDIMAD",16}, {"MDIMCD",16}, {"NDIMBD",8}, {"NDIMCD",8}, {"PADA",1}, {"PADB",1}, {"VWMD",2}, {"VWND",2}, {"WGD",32} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
const Database::DatabaseEntry Database::XgemmDirectDouble = {
|
||||||
|
"XgemmDirect", Precision::kDouble, {
|
||||||
|
{ // AMD GPUs
|
||||||
|
kDeviceTypeGPU, "AMD", {
|
||||||
|
{ "AMD Radeon R9 M370X Compute Engine", { {"KWID",2}, {"MDIMAD",16}, {"MDIMCD",16}, {"NDIMBD",8}, {"NDIMCD",8}, {"PADA",1}, {"PADB",1}, {"VWMD",1}, {"VWND",1}, {"WGD",16} } },
|
||||||
|
{ "default", { {"KWID",2}, {"MDIMAD",16}, {"MDIMCD",16}, {"NDIMBD",8}, {"NDIMCD",8}, {"PADA",1}, {"PADB",1}, {"VWMD",1}, {"VWND",1}, {"WGD",16} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ // NVIDIA GPUs
|
||||||
|
kDeviceTypeGPU, "NVIDIA", {
|
||||||
|
{ "GeForce GTX 750 Ti", { {"KWID",2}, {"MDIMAD",8}, {"MDIMCD",8}, {"NDIMBD",8}, {"NDIMCD",8}, {"PADA",1}, {"PADB",1}, {"VWMD",2}, {"VWND",2}, {"WGD",32} } },
|
||||||
|
{ "default", { {"KWID",2}, {"MDIMAD",8}, {"MDIMCD",8}, {"NDIMBD",8}, {"NDIMCD",8}, {"PADA",1}, {"PADB",1}, {"VWMD",2}, {"VWND",2}, {"WGD",32} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ // Default
|
||||||
|
kDeviceTypeAll, "default", {
|
||||||
|
{ "default", { {"KWID",2}, {"MDIMAD",16}, {"MDIMCD",16}, {"NDIMBD",8}, {"NDIMCD",8}, {"PADA",1}, {"PADB",1}, {"VWMD",1}, {"VWND",1}, {"WGD",16} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
const Database::DatabaseEntry Database::XgemmDirectComplexDouble = {
|
||||||
|
"XgemmDirect", Precision::kComplexDouble, {
|
||||||
|
{ // AMD GPUs
|
||||||
|
kDeviceTypeGPU, "AMD", {
|
||||||
|
{ "AMD Radeon R9 M370X Compute Engine", { {"KWID",2}, {"MDIMAD",16}, {"MDIMCD",16}, {"NDIMBD",16}, {"NDIMCD",16}, {"PADA",1}, {"PADB",1}, {"VWMD",1}, {"VWND",1}, {"WGD",16} } },
|
||||||
|
{ "default", { {"KWID",2}, {"MDIMAD",16}, {"MDIMCD",16}, {"NDIMBD",16}, {"NDIMCD",16}, {"PADA",1}, {"PADB",1}, {"VWMD",1}, {"VWND",1}, {"WGD",16} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ // NVIDIA GPUs
|
||||||
|
kDeviceTypeGPU, "NVIDIA", {
|
||||||
|
{ "GeForce GTX 750 Ti", { {"KWID",2}, {"MDIMAD",32}, {"MDIMCD",32}, {"NDIMBD",8}, {"NDIMCD",8}, {"PADA",1}, {"PADB",1}, {"VWMD",1}, {"VWND",1}, {"WGD",32} } },
|
||||||
|
{ "default", { {"KWID",2}, {"MDIMAD",32}, {"MDIMCD",32}, {"NDIMBD",8}, {"NDIMCD",8}, {"PADA",1}, {"PADB",1}, {"VWMD",1}, {"VWND",1}, {"WGD",32} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ // Default
|
||||||
|
kDeviceTypeAll, "default", {
|
||||||
|
{ "default", { {"KWID",2}, {"MDIMAD",8}, {"MDIMCD",8}, {"NDIMBD",8}, {"NDIMCD",8}, {"PADA",1}, {"PADB",1}, {"VWMD",1}, {"VWND",2}, {"WGD",16} } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
} // namespace clblast
|
|
@ -204,7 +204,7 @@ R"(
|
||||||
#if PRECISION == 3232 || PRECISION == 6464
|
#if PRECISION == 3232 || PRECISION == 6464
|
||||||
#define COMPLEX_CONJUGATE(value) value.x = value.x; value.y = -value.y
|
#define COMPLEX_CONJUGATE(value) value.x = value.x; value.y = -value.y
|
||||||
#else
|
#else
|
||||||
#define COMPLEX_CONJUGATE(value) value = value
|
#define COMPLEX_CONJUGATE(value)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// =================================================================================================
|
// =================================================================================================
|
||||||
|
|
273
src/kernels/level3/xgemm_direct_part1.opencl
Normal file
273
src/kernels/level3/xgemm_direct_part1.opencl
Normal file
|
@ -0,0 +1,273 @@
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
|
||||||
|
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
|
||||||
|
// width of 100 characters per line.
|
||||||
|
//
|
||||||
|
// Author(s):
|
||||||
|
// Cedric Nugteren <www.cedricnugteren.nl>
|
||||||
|
//
|
||||||
|
// This is a generic GEMM kernel that works for all sizes and configurations: it doesn't require any
|
||||||
|
// pre and and post-processing kernels.
|
||||||
|
//
|
||||||
|
// This kernel is seperated into three files. This is part 1 out of 3.
|
||||||
|
//
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
// Enables loading of this file using the C++ pre-processor's #include (C++11 standard raw string
|
||||||
|
// literal). Comment-out this line for syntax-highlighting when developing.
|
||||||
|
R"(
|
||||||
|
|
||||||
|
// Parameters set by the tuner or by the database. Here they are given a basic default value in case
|
||||||
|
// this kernel file is used outside of the CLBlast library. Note that all parameters here have a
|
||||||
|
// suffix 'D' to denote that they are for the 'direct' version of the GEMM kernel.
|
||||||
|
#ifndef WGD
|
||||||
|
#define WGD 8 // Tile-size in dimension M, N, and K (e.g. 8, 16, 32, 64)
|
||||||
|
#endif
|
||||||
|
#ifndef MDIMCD
|
||||||
|
#define MDIMCD 8 // Threads per workgroup in M-dimension (e.g. 8, 16, 32)
|
||||||
|
#endif
|
||||||
|
#ifndef NDIMCD
|
||||||
|
#define NDIMCD 8 // Threads per workgroup in N-dimension (e.g. 8, 16, 32)
|
||||||
|
#endif
|
||||||
|
#ifndef MDIMAD
|
||||||
|
#define MDIMAD 8 // Re-shaped tile dimension of matrix A: KDIMAD * MDIMAD
|
||||||
|
#endif
|
||||||
|
#ifndef NDIMBD
|
||||||
|
#define NDIMBD 8 // Re-shaped tile dimension of matrix B: KDIMBD * NDIMBD
|
||||||
|
#endif
|
||||||
|
#ifndef KWID
|
||||||
|
#define KWID 1 // Unroll factor of the WGD loop (smaller or equal than WGD)
|
||||||
|
#endif
|
||||||
|
#ifndef VWMD
|
||||||
|
#define VWMD 1 // Vector width of matrices A and C
|
||||||
|
#endif
|
||||||
|
#ifndef VWND
|
||||||
|
#define VWND 1 // Vector width of matrix B
|
||||||
|
#endif
|
||||||
|
#ifndef PADA
|
||||||
|
#define PADA 1 // Local memory padding for matrix A
|
||||||
|
#endif
|
||||||
|
#ifndef PADB
|
||||||
|
#define PADB 1 // Local memory padding for matrix B
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Helper parameters based on the above tuning parameters
|
||||||
|
#define MWID (WGD/MDIMCD) // Work per work-item (M-dimension)
|
||||||
|
#define NWID (WGD/NDIMCD) // Work per work-item (N-dimension)
|
||||||
|
#define KDIMAD ((MDIMCD*NDIMCD)/(MDIMAD)) // Re-shaped tile dimension of matrix A: KDIMAD * MDIMAD
|
||||||
|
#define KDIMBD ((MDIMCD*NDIMCD)/(NDIMBD)) // Re-shaped tile dimension of matrix B: KDIMBD * NDIMBD
|
||||||
|
#define MWAD (WGD/MDIMAD) // Amount of loads-per-thread for matrix A (M-dimension)
|
||||||
|
#define KWAD (WGD/KDIMAD) // Amount of loads-per-thread for matrix A (K-dimension)
|
||||||
|
#define KWBD (WGD/KDIMBD) // Amount of loads-per-thread for matrix B (K-dimension)
|
||||||
|
#define NWBD (WGD/NDIMBD) // Amount of loads-per-thread for matrix B (N-dimension)
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
// Data-widths in dimension M
|
||||||
|
#if VWMD == 1
|
||||||
|
typedef real realMD;
|
||||||
|
#elif VWMD == 2
|
||||||
|
typedef real2 realMD;
|
||||||
|
#elif VWMD == 4
|
||||||
|
typedef real4 realMD;
|
||||||
|
#elif VWMD == 8
|
||||||
|
typedef real8 realMD;
|
||||||
|
#elif VWMD == 16
|
||||||
|
typedef real16 realMD;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Data-widths in dimension N
|
||||||
|
#if VWND == 1
|
||||||
|
typedef real realND;
|
||||||
|
#elif VWND == 2
|
||||||
|
typedef real2 realND;
|
||||||
|
#elif VWND == 4
|
||||||
|
typedef real4 realND;
|
||||||
|
#elif VWND == 8
|
||||||
|
typedef real8 realND;
|
||||||
|
#elif VWND == 16
|
||||||
|
typedef real16 realND;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
// Initializes the accumulation registers to zero
|
||||||
|
inline void InitAccRegistersDirect(real cpm[NWID][MWID]) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int mi=0; mi<MWID; ++mi) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int ni=0; ni<NWID; ++ni) {
|
||||||
|
SetToZero(cpm[ni][mi]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
// Performs the actual computation: Cpm += Apm * Bpm
|
||||||
|
inline void MultiplyAccumulateDirect(real cpm[NWID][MWID], real apm[MWID], real bpm[NWID]) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int ni=0; ni<NWID; ++ni) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int mi=0; mi<MWID; ++mi) {
|
||||||
|
MultiplyAdd(cpm[ni][mi], apm[mi], bpm[ni]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
// Loads global off-chip memory into thread-private register files. This function is specific for
|
||||||
|
// loading the A input matrix.
|
||||||
|
inline void GlobalToPrivateDirectA(const __global real* restrict agms, real apm[MWID],
|
||||||
|
const int a_ld, const int a_offset, const int idm, const int idk,
|
||||||
|
const int a_transpose, const int a_conjugate) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int mi=0; mi<MWID; ++mi) {
|
||||||
|
const int a_index = (a_transpose) ? (idm + mi)*a_ld + idk : idk*a_ld + (idm + mi);
|
||||||
|
apm[mi] = agms[a_index + a_offset];
|
||||||
|
if (a_conjugate) { COMPLEX_CONJUGATE(apm[mi]); }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Same as above, but now for the B input matrix
|
||||||
|
inline void GlobalToPrivateDirectB(const __global real* restrict bgms, real bpm[NWID],
|
||||||
|
const int b_ld, const int b_offset, const int idn, const int idk,
|
||||||
|
const int b_transpose, const int b_conjugate) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int ni=0; ni<NWID; ++ni) {
|
||||||
|
const int b_index = (b_transpose) ? (idn + ni)*b_ld + idk : idk*b_ld + (idn + ni);
|
||||||
|
bpm[ni] = bgms[b_index + b_offset];
|
||||||
|
if (b_conjugate) { COMPLEX_CONJUGATE(bpm[ni]); }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loads global off-chip memory into thread-private register files. This function is specific for
|
||||||
|
// loading the A input matrix. This is the same as above but now includes a bounds check.
|
||||||
|
inline void GlobalToPrivateCheckedA(const __global real* restrict agms, real apm[MWID],
|
||||||
|
const int a_ld, const int a_offset, const int idm, const int idk,
|
||||||
|
const int a_transpose, const int a_conjugate,
|
||||||
|
const int kSizeM) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int mi=0; mi<MWID; ++mi) {
|
||||||
|
if (idm + mi < kSizeM) {
|
||||||
|
const int a_index = (a_transpose) ? (idm + mi)*a_ld + idk : idk*a_ld + (idm + mi);
|
||||||
|
apm[mi] = agms[a_index + a_offset];
|
||||||
|
if (a_conjugate) { COMPLEX_CONJUGATE(apm[mi]); }
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
SetToZero(apm[mi]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Same as above, but now for the B input matrix
|
||||||
|
inline void GlobalToPrivateCheckedB(const __global real* restrict bgms, real bpm[NWID],
|
||||||
|
const int b_ld, const int b_offset, const int idn, const int idk,
|
||||||
|
const int b_transpose, const int b_conjugate,
|
||||||
|
const int kSizeN) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int ni=0; ni<NWID; ++ni) {
|
||||||
|
if (idn + ni < kSizeN) {
|
||||||
|
const int b_index = (b_transpose) ? (idn + ni)*b_ld + idk : idk*b_ld + (idn + ni);
|
||||||
|
bpm[ni] = bgms[b_index + b_offset];
|
||||||
|
if (b_conjugate) { COMPLEX_CONJUGATE(bpm[ni]); }
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
SetToZero(bpm[ni]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
// Caches on-chip local memory into per-thread private memory (registers). This function is specific
|
||||||
|
// for caching the A input matrix.
|
||||||
|
inline void LocalToPrivateDirectA(__local real* alm, real apm[MWID], const int kg,
|
||||||
|
const int a_transpose) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int mi=0; mi<MWID; ++mi) {
|
||||||
|
const int mg = mi + get_local_id(0)*MWID;
|
||||||
|
const int index = (a_transpose) ? mg*(WGD + PADA) + kg : kg*(WGD + PADA) + mg;
|
||||||
|
apm[mi] = alm[index];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Same as above, but now for the B input matrix
|
||||||
|
inline void LocalToPrivateDirectB(__local real* blm, real bpm[NWID], const int kg,
|
||||||
|
const int b_transpose) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int ni=0; ni<NWID; ++ni) {
|
||||||
|
const int ng = ni + get_local_id(1)*NWID;
|
||||||
|
const int index = (b_transpose) ? ng*(WGD + PADB) + kg : kg*(WGD + PADB) + ng;
|
||||||
|
bpm[ni] = blm[index];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
// Merges the results in Cpm with the global array in Cgm. This also performs the multiplication
|
||||||
|
// with the constants: Cgm = alpha*A*B + beta*Cgm = alpha*Cpm + beta*Cgm
|
||||||
|
inline void StoreResultsDirect(__global real* cgm, real cpm[NWID][MWID],
|
||||||
|
const int idm, const int idn,
|
||||||
|
const real alpha, const real beta,
|
||||||
|
const int c_ld, const int c_offset, const int c_transpose) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int ni=0; ni<NWID; ++ni) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int mi=0; mi<MWID; ++mi) {
|
||||||
|
|
||||||
|
// Determines the destination index
|
||||||
|
int c_index = (c_transpose) ? (idm + mi)*c_ld + (idn + ni) : (idn + ni)*c_ld + (idm + mi);
|
||||||
|
|
||||||
|
// The final multiplication with alpha (in case beta == 0)
|
||||||
|
real result;
|
||||||
|
if (IsZero(beta)) {
|
||||||
|
Multiply(result, alpha, cpm[ni][mi]);
|
||||||
|
}
|
||||||
|
// The final multiplication with alpha and the addition with beta*C
|
||||||
|
else {
|
||||||
|
AXPBY(result, alpha, cpm[ni][mi], beta, cgm[c_index + c_offset]);
|
||||||
|
}
|
||||||
|
cgm[c_index + c_offset] = result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merges the results in Cpm with the global array in Cgm. This also performs the multiplication
|
||||||
|
// with the constants: Cgm = alpha*A*B + beta*Cgm = alpha*Cpm + beta*Cgm
|
||||||
|
inline void StoreResultsChecked(__global real* cgm, real cpm[NWID][MWID],
|
||||||
|
const int idm, const int idn, const int kSizeM, const int kSizeN,
|
||||||
|
const real alpha, const real beta,
|
||||||
|
const int c_ld, const int c_offset, const int c_transpose) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int ni=0; ni<NWID; ++ni) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int mi=0; mi<MWID; ++mi) {
|
||||||
|
if ((idm + mi) < kSizeM && (idn + ni) < kSizeN) {
|
||||||
|
|
||||||
|
// Determines the destination index
|
||||||
|
int c_index = (c_transpose) ? (idm + mi)*c_ld + (idn + ni) : (idn + ni)*c_ld + (idm + mi);
|
||||||
|
|
||||||
|
// The final multiplication with alpha (in case beta == 0)
|
||||||
|
real result;
|
||||||
|
if (IsZero(beta)) {
|
||||||
|
Multiply(result, alpha, cpm[ni][mi]);
|
||||||
|
}
|
||||||
|
// The final multiplication with alpha and the addition with beta*C
|
||||||
|
else {
|
||||||
|
AXPBY(result, alpha, cpm[ni][mi], beta, cgm[c_index + c_offset]);
|
||||||
|
}
|
||||||
|
cgm[c_index + c_offset] = result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
// End of the C++11 raw string literal
|
||||||
|
)"
|
||||||
|
|
||||||
|
// =================================================================================================
|
314
src/kernels/level3/xgemm_direct_part2.opencl
Normal file
314
src/kernels/level3/xgemm_direct_part2.opencl
Normal file
|
@ -0,0 +1,314 @@
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
|
||||||
|
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
|
||||||
|
// width of 100 characters per line.
|
||||||
|
//
|
||||||
|
// Author(s):
|
||||||
|
// Cedric Nugteren <www.cedricnugteren.nl>
|
||||||
|
//
|
||||||
|
// This is part 2 of 3 of the GEMM kernel. See part 1 for more information.
|
||||||
|
//
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
// Enables loading of this file using the C++ pre-processor's #include (C++11 standard raw string
|
||||||
|
// literal). Comment-out this line for syntax-highlighting when developing.
|
||||||
|
R"(
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
// Caches global off-chip memory into local (shared) memory on-chip. This function is specific for
|
||||||
|
// caching the A input matrix.
|
||||||
|
inline void GlobalToLocalDirectA(const __global realMD* restrict agm, __local real* alm,
|
||||||
|
const int a_ld, const int a_offset, const int kwg,
|
||||||
|
const int a_transpose, const int a_conjugate) {
|
||||||
|
#if MDIMCD == MDIMAD
|
||||||
|
const int la0 = get_local_id(0);
|
||||||
|
const int la1 = get_local_id(1);
|
||||||
|
#else
|
||||||
|
const int tid = get_local_id(0) + MDIMCD*get_local_id(1);
|
||||||
|
const int la0 = tid % MDIMAD;
|
||||||
|
const int la1 = tid / MDIMAD;
|
||||||
|
#endif
|
||||||
|
#pragma unroll
|
||||||
|
for (int mia=0; mia<MWAD/VWMD; ++mia) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int kia=0; kia<KWAD; ++kia) {
|
||||||
|
|
||||||
|
// Computes the indices for the global memory
|
||||||
|
int mg = mia + la0*(MWAD/VWMD);
|
||||||
|
int kg = kia + la1*KWAD;
|
||||||
|
int idm = (a_transpose) ? mg + kwg/VWMD : mg + GetGroupID0()*(WGD/VWMD);
|
||||||
|
int idk = (a_transpose) ? kg + GetGroupID0()*WGD : kg + kwg;
|
||||||
|
|
||||||
|
// Loads the data from global memory into the local memory
|
||||||
|
const realMD avec = agm[idk*(a_ld/VWMD) + idm + a_offset];
|
||||||
|
#if VWMD == 1
|
||||||
|
alm[kg*(WGD + PADA) + mg] = avec;
|
||||||
|
#elif VWMD == 2
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 0] = avec.x;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 1] = avec.y;
|
||||||
|
#elif VWMD == 4
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 0] = avec.x;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 1] = avec.y;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 2] = avec.z;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 3] = avec.w;
|
||||||
|
#elif VWMD == 8
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 0] = avec.s0;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 1] = avec.s1;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 2] = avec.s2;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 3] = avec.s3;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 4] = avec.s4;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 5] = avec.s5;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 6] = avec.s6;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 7] = avec.s7;
|
||||||
|
#elif VWMD == 16
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 0] = avec.s0;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 1] = avec.s1;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 2] = avec.s2;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 3] = avec.s3;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 4] = avec.s4;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 5] = avec.s5;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 6] = avec.s6;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 7] = avec.s7;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 8] = avec.s8;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 9] = avec.s9;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 10] = avec.sA;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 11] = avec.sB;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 12] = avec.sC;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 13] = avec.sD;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 14] = avec.sE;
|
||||||
|
alm[kg*(WGD + PADA) + mg*VWMD + 15] = avec.sF;
|
||||||
|
#endif
|
||||||
|
if (a_conjugate) {
|
||||||
|
for (int vm=0; vm<VWMD; ++vm) {
|
||||||
|
COMPLEX_CONJUGATE(alm[kg*(WGD + PADA) + mg*VWMD + vm]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Same as above, but now for the B input matrix
|
||||||
|
inline void GlobalToLocalDirectB(const __global realND* restrict bgm, __local real* blm,
|
||||||
|
const int b_ld, const int b_offset, const int kwg,
|
||||||
|
const int b_transpose, const int b_conjugate) {
|
||||||
|
#if MDIMCD == NDIMBD
|
||||||
|
const int lb0 = get_local_id(0);
|
||||||
|
const int lb1 = get_local_id(1);
|
||||||
|
#else
|
||||||
|
const int tid = get_local_id(0) + MDIMCD*get_local_id(1);
|
||||||
|
const int lb0 = tid % NDIMBD;
|
||||||
|
const int lb1 = tid / NDIMBD;
|
||||||
|
#endif
|
||||||
|
#pragma unroll
|
||||||
|
for (int kib=0; kib<KWBD; ++kib) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int nib=0; nib<NWBD/VWND; ++nib) {
|
||||||
|
|
||||||
|
// Computes the indices for the global memory
|
||||||
|
int ng = nib + lb0*(NWBD/VWND);
|
||||||
|
int kg = kib + lb1*KWBD;
|
||||||
|
int idn = (b_transpose) ? ng + kwg/VWND : ng + GetGroupID1()*(WGD/VWND);
|
||||||
|
int idk = (b_transpose) ? kg + GetGroupID1()*WGD : kg + kwg;
|
||||||
|
|
||||||
|
// Loads the data from global memory into the local memory
|
||||||
|
const realND bvec = bgm[idk*(b_ld/VWND) + idn + b_offset];
|
||||||
|
#if VWND == 1
|
||||||
|
blm[kg*(WGD + PADB) + ng] = bvec;
|
||||||
|
#elif VWND == 2
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 0] = bvec.x;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 1] = bvec.y;
|
||||||
|
#elif VWND == 4
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 0] = bvec.x;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 1] = bvec.y;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 2] = bvec.z;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 3] = bvec.w;
|
||||||
|
#elif VWND == 8
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 0] = bvec.s0;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 1] = bvec.s1;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 2] = bvec.s2;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 3] = bvec.s3;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 4] = bvec.s4;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 5] = bvec.s5;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 6] = bvec.s6;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 7] = bvec.s7;
|
||||||
|
#elif VWND == 16
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 0] = bvec.s0;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 1] = bvec.s1;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 2] = bvec.s2;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 3] = bvec.s3;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 4] = bvec.s4;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 5] = bvec.s5;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 6] = bvec.s6;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 7] = bvec.s7;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 8] = bvec.s8;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 9] = bvec.s9;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 10] = bvec.sA;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 11] = bvec.sB;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 12] = bvec.sC;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 13] = bvec.sD;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 14] = bvec.sE;
|
||||||
|
blm[kg*(WGD + PADB) + ng*VWND + 15] = bvec.sF;
|
||||||
|
#endif
|
||||||
|
if (b_conjugate) {
|
||||||
|
for (int vn=0; vn<VWND; ++vn) {
|
||||||
|
COMPLEX_CONJUGATE(blm[kg*(WGD + PADB) + ng*VWND + vn]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
// Caches global off-chip memory into local (shared) memory on-chip. This function is specific for
|
||||||
|
// caching the A input matrix. In contrast to the functions above, this function performs doesn't
|
||||||
|
// use the vector data-types.
|
||||||
|
inline void GlobalToLocalScalarA(const __global real* restrict agms, __local real* alm,
|
||||||
|
const int a_ld, const int a_offset, const int kwg,
|
||||||
|
const int a_transpose, const int a_conjugate) {
|
||||||
|
#if MDIMCD == MDIMAD
|
||||||
|
const int la0 = get_local_id(0);
|
||||||
|
const int la1 = get_local_id(1);
|
||||||
|
#else
|
||||||
|
const int tid = get_local_id(0) + MDIMCD*get_local_id(1);
|
||||||
|
const int la0 = tid % MDIMAD;
|
||||||
|
const int la1 = tid / MDIMAD;
|
||||||
|
#endif
|
||||||
|
#pragma unroll
|
||||||
|
for (int mia=0; mia<MWAD; ++mia) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int kia=0; kia<KWAD; ++kia) {
|
||||||
|
|
||||||
|
// Computes the indices for the global memory
|
||||||
|
int mg = mia + la0*MWAD;
|
||||||
|
int kg = kia + la1*KWAD;
|
||||||
|
int idm = (a_transpose) ? mg + kwg : mg + GetGroupID0()*WGD;
|
||||||
|
int idk = (a_transpose) ? kg + GetGroupID0()*WGD : kg + kwg;
|
||||||
|
|
||||||
|
// Loads the data from global memory into the local memory
|
||||||
|
real result = agms[idk*a_ld + idm + a_offset];
|
||||||
|
if (a_conjugate) { COMPLEX_CONJUGATE(result); }
|
||||||
|
alm[kg*(WGD + PADA) + mg] = result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Same as above, but now for the B input matrix
|
||||||
|
inline void GlobalToLocalScalarB(const __global real* restrict bgms, __local real* blm,
|
||||||
|
const int b_ld, const int b_offset, const int kwg,
|
||||||
|
const int b_transpose, const int b_conjugate) {
|
||||||
|
#if MDIMCD == NDIMBD
|
||||||
|
const int lb0 = get_local_id(0);
|
||||||
|
const int lb1 = get_local_id(1);
|
||||||
|
#else
|
||||||
|
const int tid = get_local_id(0) + MDIMCD*get_local_id(1);
|
||||||
|
const int lb0 = tid % NDIMBD;
|
||||||
|
const int lb1 = tid / NDIMBD;
|
||||||
|
#endif
|
||||||
|
#pragma unroll
|
||||||
|
for (int kib=0; kib<KWBD; ++kib) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int nib=0; nib<NWBD; ++nib) {
|
||||||
|
|
||||||
|
// Computes the indices for the global memory
|
||||||
|
int ng = nib + lb0*NWBD;
|
||||||
|
int kg = kib + lb1*KWBD;
|
||||||
|
int idn = (b_transpose) ? ng + kwg : ng + GetGroupID1()*WGD;
|
||||||
|
int idk = (b_transpose) ? kg + GetGroupID1()*WGD : kg + kwg;
|
||||||
|
|
||||||
|
// Loads the data from global memory into the local memory
|
||||||
|
real result = bgms[idk*b_ld + idn + b_offset];
|
||||||
|
if (b_conjugate) { COMPLEX_CONJUGATE(result); }
|
||||||
|
blm[kg*(WGD + PADB) + ng] = result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
// Caches global off-chip memory into local (shared) memory on-chip. This function is specific for
|
||||||
|
// caching the A input matrix. In contrast to the functions above, this function performs bounds
|
||||||
|
// checks and doesn't use the vector data-types.
|
||||||
|
inline void GlobalToLocalCheckedA(const __global real* restrict agms, __local real* alm,
|
||||||
|
const int a_ld, const int a_offset, const int kwg,
|
||||||
|
const int a_transpose, const int a_conjugate,
|
||||||
|
const int kSizeM, const int kSizeK) {
|
||||||
|
#if MDIMCD == MDIMAD
|
||||||
|
const int la0 = get_local_id(0);
|
||||||
|
const int la1 = get_local_id(1);
|
||||||
|
#else
|
||||||
|
const int tid = get_local_id(0) + MDIMCD*get_local_id(1);
|
||||||
|
const int la0 = tid % MDIMAD;
|
||||||
|
const int la1 = tid / MDIMAD;
|
||||||
|
#endif
|
||||||
|
#pragma unroll
|
||||||
|
for (int mia=0; mia<MWAD; ++mia) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int kia=0; kia<KWAD; ++kia) {
|
||||||
|
|
||||||
|
// Computes the indices for the global memory
|
||||||
|
int mg = mia + la0*MWAD;
|
||||||
|
int kg = kia + la1*KWAD;
|
||||||
|
int idm = (a_transpose) ? mg + kwg : mg + GetGroupID0()*WGD;
|
||||||
|
int idk = (a_transpose) ? kg + GetGroupID0()*WGD : kg + kwg;
|
||||||
|
|
||||||
|
// Loads the data from global memory into the local memory
|
||||||
|
int condition = (a_transpose) ? idm < kSizeK : idm < kSizeM;
|
||||||
|
if (condition) {
|
||||||
|
real result = agms[idk*a_ld + idm + a_offset];
|
||||||
|
if (a_conjugate) { COMPLEX_CONJUGATE(result); }
|
||||||
|
alm[kg*(WGD + PADA) + mg] = result;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
SetToZero(alm[kg*(WGD + PADA) + mg]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Same as above, but now for the B input matrix
|
||||||
|
inline void GlobalToLocalCheckedB(const __global real* restrict bgms, __local real* blm,
|
||||||
|
const int b_ld, const int b_offset, const int kwg,
|
||||||
|
const int b_transpose, const int b_conjugate,
|
||||||
|
const int kSizeN, const int kSizeK) {
|
||||||
|
#if MDIMCD == NDIMBD
|
||||||
|
const int lb0 = get_local_id(0);
|
||||||
|
const int lb1 = get_local_id(1);
|
||||||
|
#else
|
||||||
|
const int tid = get_local_id(0) + MDIMCD*get_local_id(1);
|
||||||
|
const int lb0 = tid % NDIMBD;
|
||||||
|
const int lb1 = tid / NDIMBD;
|
||||||
|
#endif
|
||||||
|
#pragma unroll
|
||||||
|
for (int kib=0; kib<KWBD; ++kib) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int nib=0; nib<NWBD; ++nib) {
|
||||||
|
|
||||||
|
// Computes the indices for the global memory
|
||||||
|
int ng = nib + lb0*NWBD;
|
||||||
|
int kg = kib + lb1*KWBD;
|
||||||
|
int idn = (b_transpose) ? ng + kwg : ng + GetGroupID1()*WGD;
|
||||||
|
int idk = (b_transpose) ? kg + GetGroupID1()*WGD : kg + kwg;
|
||||||
|
|
||||||
|
// Loads the data from global memory into the local memory
|
||||||
|
int condition = (b_transpose) ? idn < kSizeK : idn < kSizeN;
|
||||||
|
if (condition) {
|
||||||
|
real result = bgms[idk*b_ld + idn + b_offset];
|
||||||
|
if (b_conjugate) { COMPLEX_CONJUGATE(result); }
|
||||||
|
blm[kg*(WGD + PADB) + ng] = result;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
SetToZero(blm[kg*(WGD + PADB) + ng]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
// End of the C++11 raw string literal
|
||||||
|
)"
|
||||||
|
|
||||||
|
// =================================================================================================
|
214
src/kernels/level3/xgemm_direct_part3.opencl
Normal file
214
src/kernels/level3/xgemm_direct_part3.opencl
Normal file
|
@ -0,0 +1,214 @@
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
|
||||||
|
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
|
||||||
|
// width of 100 characters per line.
|
||||||
|
//
|
||||||
|
// Author(s):
|
||||||
|
// Cedric Nugteren <www.cedricnugteren.nl>
|
||||||
|
//
|
||||||
|
// This is part 3 of 3 of the GEMM kernel. See part 1 for more information.
|
||||||
|
//
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
// Enables loading of this file using the C++ pre-processor's #include (C++11 standard raw string
|
||||||
|
// literal). Comment-out this line for syntax-highlighting when developing.
|
||||||
|
R"(
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
// Main body of the kernel. This is the direct version without pre/post processing and restrictions.
|
||||||
|
inline void XgemmDirect(const int kSizeM, const int kSizeN, const int kSizeK,
|
||||||
|
const real_arg arg_alpha,
|
||||||
|
const real_arg arg_beta,
|
||||||
|
const __global realMD* restrict agm, const int a_offset, const int a_ld,
|
||||||
|
const __global realND* restrict bgm, const int b_offset, const int b_ld,
|
||||||
|
__global real* cgm, const int c_offset, const int c_ld,
|
||||||
|
__local real* alm, __local real* blm,
|
||||||
|
const int a_transpose, const int b_transpose, const int c_transpose,
|
||||||
|
const int a_conjugate, const int b_conjugate) {
|
||||||
|
const real alpha = GetRealArg(arg_alpha);
|
||||||
|
const real beta = GetRealArg(arg_beta);
|
||||||
|
|
||||||
|
// Extra pointers to scalar versions of global memory
|
||||||
|
const __global real* restrict agms = (const __global real* restrict) agm;
|
||||||
|
const __global real* restrict bgms = (const __global real* restrict) bgm;
|
||||||
|
|
||||||
|
// Allocates workitem-private memory (registers)
|
||||||
|
real apm[MWID];
|
||||||
|
real bpm[NWID];
|
||||||
|
real cpm[NWID][MWID];
|
||||||
|
|
||||||
|
// Initializes the accumulation registers
|
||||||
|
InitAccRegistersDirect(cpm);
|
||||||
|
|
||||||
|
// The faster version of GEMM is not allowed on the (incomplete) borders. Therefore, this section
|
||||||
|
// processes only the main parts: output blocks of WGD by WGD.
|
||||||
|
const int idm = get_local_id(0) * MWID + GetGroupID0() * WGD;
|
||||||
|
const int idn = get_local_id(1) * NWID + GetGroupID1() * WGD;
|
||||||
|
if ((idm < (kSizeM/WGD)*WGD) && (idn < (kSizeN/WGD)*WGD)) {
|
||||||
|
|
||||||
|
// Loops over all complete workgroup tiles (K-dimension)
|
||||||
|
int kwg = 0;
|
||||||
|
for (; kwg < (kSizeK/WGD) * WGD; kwg+=WGD) {
|
||||||
|
|
||||||
|
// Loads data: off-chip --> local (matrix A and B)
|
||||||
|
if (a_ld % VWMD == 0) {
|
||||||
|
GlobalToLocalDirectA(agm, alm, a_ld, a_offset, kwg, a_transpose, a_conjugate);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
GlobalToLocalScalarA(agms, alm, a_ld, a_offset, kwg, a_transpose, a_conjugate);
|
||||||
|
}
|
||||||
|
if (b_ld % VWND == 0) {
|
||||||
|
GlobalToLocalDirectB(bgm, blm, b_ld, b_offset, kwg, b_transpose, b_conjugate);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
GlobalToLocalScalarB(bgms, blm, b_ld, b_offset, kwg, b_transpose, b_conjugate);
|
||||||
|
}
|
||||||
|
barrier(CLK_LOCAL_MEM_FENCE);
|
||||||
|
|
||||||
|
// Loops over all workitem tiles, unrolled by a factor KWID
|
||||||
|
for (int pwi=0; pwi<WGD; pwi+=KWID) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int pit=0; pit<KWID; ++pit) {
|
||||||
|
int kg = pwi + pit;
|
||||||
|
|
||||||
|
// Loads data: local --> private (matrix A and B)
|
||||||
|
LocalToPrivateDirectA(alm, apm, kg, a_transpose);
|
||||||
|
LocalToPrivateDirectB(blm, bpm, kg, b_transpose);
|
||||||
|
|
||||||
|
// Performs the accumulation (Cpm += Apm * Bpm)
|
||||||
|
MultiplyAccumulateDirect(cpm, apm, bpm);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
barrier(CLK_LOCAL_MEM_FENCE);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loop over the remaining part (incomplete tile in K-dimension)
|
||||||
|
for (; kwg < kSizeK; ++kwg) {
|
||||||
|
|
||||||
|
// Loads data: off-chip --> private (matrix A and B)
|
||||||
|
GlobalToPrivateDirectA(agms, apm, a_ld, a_offset, idm, kwg, a_transpose, a_conjugate);
|
||||||
|
GlobalToPrivateDirectB(bgms, bpm, b_ld, b_offset, idn, kwg, b_transpose, b_conjugate);
|
||||||
|
|
||||||
|
// Performs the accumulation (Cpm += Apm * Bpm)
|
||||||
|
MultiplyAccumulateDirect(cpm, apm, bpm);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stores a tile of results and performs the multiplication with alpha and beta
|
||||||
|
StoreResultsDirect(cgm, cpm, idm, idn, alpha, beta, c_ld, c_offset, c_transpose);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Simple but slower version for the parts on the edge (incomplete tiles in M and N-dimensions)
|
||||||
|
else {
|
||||||
|
|
||||||
|
// Loops over all complete workgroup tiles (K-dimension)
|
||||||
|
int kwg = 0;
|
||||||
|
for (; kwg < (kSizeK/WGD) * WGD; kwg+=WGD) {
|
||||||
|
|
||||||
|
// Loads data: off-chip --> local (matrix A and B)
|
||||||
|
GlobalToLocalCheckedA(agms, alm, a_ld, a_offset, kwg, a_transpose, a_conjugate, kSizeM, kSizeK);
|
||||||
|
GlobalToLocalCheckedB(bgms, blm, b_ld, b_offset, kwg, b_transpose, b_conjugate, kSizeN, kSizeK);
|
||||||
|
barrier(CLK_LOCAL_MEM_FENCE);
|
||||||
|
|
||||||
|
// Loops over all workitem tiles, unrolled by a factor KWID
|
||||||
|
for (int pwi=0; pwi<WGD; pwi+=KWID) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int pit=0; pit<KWID; ++pit) {
|
||||||
|
int kg = pwi + pit;
|
||||||
|
|
||||||
|
// Loads data: local --> private (matrix A and B)
|
||||||
|
LocalToPrivateDirectA(alm, apm, kg, a_transpose);
|
||||||
|
LocalToPrivateDirectB(blm, bpm, kg, b_transpose);
|
||||||
|
|
||||||
|
// Performs the accumulation (Cpm += Apm * Bpm)
|
||||||
|
MultiplyAccumulateDirect(cpm, apm, bpm);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
barrier(CLK_LOCAL_MEM_FENCE);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loop over the remaining part (incomplete tile in K-dimension)
|
||||||
|
for (; kwg < kSizeK; ++kwg) {
|
||||||
|
|
||||||
|
// Loads data: off-chip --> private (matrix A and B)
|
||||||
|
GlobalToPrivateCheckedA(agms, apm, a_ld, a_offset, idm, kwg, a_transpose, a_conjugate, kSizeM);
|
||||||
|
GlobalToPrivateCheckedB(bgms, bpm, b_ld, b_offset, idn, kwg, b_transpose, b_conjugate, kSizeN);
|
||||||
|
|
||||||
|
// Performs the accumulation (Cpm += Apm * Bpm)
|
||||||
|
MultiplyAccumulateDirect(cpm, apm, bpm);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stores a tile of results and performs the multiplication with alpha and beta
|
||||||
|
StoreResultsChecked(cgm, cpm, idm, idn, kSizeM, kSizeN, alpha, beta, c_ld, c_offset, c_transpose);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
// Direct version of the GEMM kernel with [A, B] = [non-transposed, non-transposed]
|
||||||
|
__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
|
||||||
|
__kernel void XgemmDirectNN(const int kSizeM, const int kSizeN, const int kSizeK,
|
||||||
|
const real_arg arg_alpha, const real_arg arg_beta,
|
||||||
|
const __global realMD* restrict agm, const int a_offset, const int a_ld,
|
||||||
|
const __global realND* restrict bgm, const int b_offset, const int b_ld,
|
||||||
|
__global real* cgm, const int c_offset, const int c_ld,
|
||||||
|
const int c_transpose, const int a_conjugate, const int b_conjugate) {
|
||||||
|
__local real alm[WGD * (WGD + PADA)];
|
||||||
|
__local real blm[WGD * (WGD + PADB)];
|
||||||
|
XgemmDirect(kSizeM, kSizeN, kSizeK, arg_alpha, arg_beta,
|
||||||
|
agm, a_offset, a_ld, bgm, b_offset, b_ld, cgm, c_offset, c_ld,
|
||||||
|
alm, blm, 0, 0, c_transpose, a_conjugate, b_conjugate);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Direct version of the GEMM kernel with [A, B] = [non-transposed, transposed]
|
||||||
|
__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
|
||||||
|
__kernel void XgemmDirectNT(const int kSizeM, const int kSizeN, const int kSizeK,
|
||||||
|
const real_arg arg_alpha, const real_arg arg_beta,
|
||||||
|
const __global realMD* restrict agm, const int a_offset, const int a_ld,
|
||||||
|
const __global realND* restrict bgm, const int b_offset, const int b_ld,
|
||||||
|
__global real* cgm, const int c_offset, const int c_ld,
|
||||||
|
const int c_transpose, const int a_conjugate, const int b_conjugate) {
|
||||||
|
__local real alm[WGD * (WGD + PADA)];
|
||||||
|
__local real blm[WGD * (WGD + PADB)];
|
||||||
|
XgemmDirect(kSizeM, kSizeN, kSizeK, arg_alpha, arg_beta,
|
||||||
|
agm, a_offset, a_ld, bgm, b_offset, b_ld, cgm, c_offset, c_ld,
|
||||||
|
alm, blm, 0, 1, c_transpose, a_conjugate, b_conjugate);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Direct version of the GEMM kernel with [A, B] = [transposed, non-transposed]
|
||||||
|
__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
|
||||||
|
__kernel void XgemmDirectTN(const int kSizeM, const int kSizeN, const int kSizeK,
|
||||||
|
const real_arg arg_alpha, const real_arg arg_beta,
|
||||||
|
const __global realMD* restrict agm, const int a_offset, const int a_ld,
|
||||||
|
const __global realND* restrict bgm, const int b_offset, const int b_ld,
|
||||||
|
__global real* cgm, const int c_offset, const int c_ld,
|
||||||
|
const int c_transpose, const int a_conjugate, const int b_conjugate) {
|
||||||
|
__local real alm[WGD * (WGD + PADA)];
|
||||||
|
__local real blm[WGD * (WGD + PADB)];
|
||||||
|
XgemmDirect(kSizeM, kSizeN, kSizeK, arg_alpha, arg_beta,
|
||||||
|
agm, a_offset, a_ld, bgm, b_offset, b_ld, cgm, c_offset, c_ld,
|
||||||
|
alm, blm, 1, 0, c_transpose, a_conjugate, b_conjugate);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Direct version of the GEMM kernel with [A, B] = [transposed, transposed]
|
||||||
|
__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
|
||||||
|
__kernel void XgemmDirectTT(const int kSizeM, const int kSizeN, const int kSizeK,
|
||||||
|
const real_arg arg_alpha, const real_arg arg_beta,
|
||||||
|
const __global realMD* restrict agm, const int a_offset, const int a_ld,
|
||||||
|
const __global realND* restrict bgm, const int b_offset, const int b_ld,
|
||||||
|
__global real* cgm, const int c_offset, const int c_ld,
|
||||||
|
const int c_transpose, const int a_conjugate, const int b_conjugate) {
|
||||||
|
__local real alm[WGD * (WGD + PADA)];
|
||||||
|
__local real blm[WGD * (WGD + PADB)];
|
||||||
|
XgemmDirect(kSizeM, kSizeN, kSizeK, arg_alpha, arg_beta,
|
||||||
|
agm, a_offset, a_ld, bgm, b_offset, b_ld, cgm, c_offset, c_ld,
|
||||||
|
alm, blm, 1, 1, c_transpose, a_conjugate, b_conjugate);
|
||||||
|
}
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
// End of the C++11 raw string literal
|
||||||
|
)"
|
||||||
|
|
||||||
|
// =================================================================================================
|
|
@ -22,7 +22,9 @@ namespace clblast {
|
||||||
// Constructor: forwards to base class constructor
|
// Constructor: forwards to base class constructor
|
||||||
template <typename T>
|
template <typename T>
|
||||||
Xgemm<T>::Xgemm(Queue &queue, EventPointer event, const std::string &name):
|
Xgemm<T>::Xgemm(Queue &queue, EventPointer event, const std::string &name):
|
||||||
Routine(queue, event, name, {"Copy","Pad","Transpose","Padtranspose","Xgemm"}, PrecisionValue<T>()) {
|
Routine(queue, event, name,
|
||||||
|
{"Copy","Pad","Transpose","Padtranspose","Xgemm","XgemmDirect","KernelSelection"},
|
||||||
|
PrecisionValue<T>()) {
|
||||||
source_string_ =
|
source_string_ =
|
||||||
#include "../../kernels/level3/level3.opencl"
|
#include "../../kernels/level3/level3.opencl"
|
||||||
#include "../../kernels/level3/copy_fast.opencl"
|
#include "../../kernels/level3/copy_fast.opencl"
|
||||||
|
@ -35,6 +37,9 @@ Xgemm<T>::Xgemm(Queue &queue, EventPointer event, const std::string &name):
|
||||||
#include "../../kernels/level3/xgemm_part1.opencl"
|
#include "../../kernels/level3/xgemm_part1.opencl"
|
||||||
#include "../../kernels/level3/xgemm_part2.opencl"
|
#include "../../kernels/level3/xgemm_part2.opencl"
|
||||||
#include "../../kernels/level3/xgemm_part3.opencl"
|
#include "../../kernels/level3/xgemm_part3.opencl"
|
||||||
|
#include "../../kernels/level3/xgemm_direct_part1.opencl"
|
||||||
|
#include "../../kernels/level3/xgemm_direct_part2.opencl"
|
||||||
|
#include "../../kernels/level3/xgemm_direct_part3.opencl"
|
||||||
;
|
;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -98,6 +103,44 @@ StatusCode Xgemm<T>::DoGemm(const Layout layout,
|
||||||
status = TestMatrixC(c_one, c_two, c_buffer, c_offset, c_ld);
|
status = TestMatrixC(c_one, c_two, c_buffer, c_offset, c_ld);
|
||||||
if (ErrorIn(status)) { return status; }
|
if (ErrorIn(status)) { return status; }
|
||||||
|
|
||||||
|
// Selects which version of GEMM to run
|
||||||
|
const auto do_gemm_direct = (m * n * k < db_["XGEMM_MIN_INDIRECT_SIZE"]);
|
||||||
|
if (do_gemm_direct) { // for small sizes (single kernel)
|
||||||
|
return GemmDirect(m, n, k, alpha,
|
||||||
|
a_buffer, a_offset, a_ld, b_buffer, b_offset, b_ld, beta,
|
||||||
|
c_buffer, c_offset, c_ld,
|
||||||
|
a_do_transpose, b_do_transpose, c_do_transpose, a_conjugate, b_conjugate);
|
||||||
|
}
|
||||||
|
else { // for larger sizes (pre/post-processing plus a very fast kernel)
|
||||||
|
return GemmIndirect(m, n, k, alpha,
|
||||||
|
a_buffer, a_offset, a_ld, b_buffer, b_offset, b_ld, beta,
|
||||||
|
c_buffer, c_offset, c_ld,
|
||||||
|
a_do_transpose, b_do_transpose, c_do_transpose, a_conjugate, b_conjugate,
|
||||||
|
a_one, a_two, a_want_rotated,
|
||||||
|
b_one, b_two, b_want_rotated,
|
||||||
|
c_one, c_two, c_want_rotated);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
// The indirect version of GEMM. This uses the faster but non-general kernel. It has specific
|
||||||
|
// requirements, but several pre and post-processing kernels take care of those. However, the
|
||||||
|
// overhead of these extra kernels might not be ideal for certain devices/arguments.
|
||||||
|
template <typename T>
|
||||||
|
StatusCode Xgemm<T>::GemmIndirect(const size_t m, const size_t n, const size_t k,
|
||||||
|
const T alpha,
|
||||||
|
const Buffer<T> &a_buffer, const size_t a_offset, const size_t a_ld,
|
||||||
|
const Buffer<T> &b_buffer, const size_t b_offset, const size_t b_ld,
|
||||||
|
const T beta,
|
||||||
|
const Buffer<T> &c_buffer, const size_t c_offset, const size_t c_ld,
|
||||||
|
const bool a_do_transpose, const bool b_do_transpose, const bool c_do_transpose,
|
||||||
|
const bool a_conjugate, const bool b_conjugate,
|
||||||
|
const size_t a_one, const size_t a_two, const bool a_want_rotated,
|
||||||
|
const size_t b_one, const size_t b_two, const bool b_want_rotated,
|
||||||
|
const size_t c_one, const size_t c_two, const bool c_want_rotated) {
|
||||||
|
auto status = StatusCode::kSuccess;
|
||||||
|
|
||||||
// Calculates the ceiled versions of m, n, and k
|
// Calculates the ceiled versions of m, n, and k
|
||||||
const auto m_ceiled = Ceil(m, db_["MWG"]);
|
const auto m_ceiled = Ceil(m, db_["MWG"]);
|
||||||
const auto n_ceiled = Ceil(n, db_["NWG"]);
|
const auto n_ceiled = Ceil(n, db_["NWG"]);
|
||||||
|
@ -217,6 +260,66 @@ StatusCode Xgemm<T>::DoGemm(const Layout layout,
|
||||||
} catch (...) { return StatusCode::kTempBufferAllocFailure; }
|
} catch (...) { return StatusCode::kTempBufferAllocFailure; }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
// The direct version of GEMM, requiring just one kernel, no pre or post-processing kernels.
|
||||||
|
template <typename T>
|
||||||
|
StatusCode Xgemm<T>::GemmDirect(const size_t m, const size_t n, const size_t k,
|
||||||
|
const T alpha,
|
||||||
|
const Buffer<T> &a_buffer, const size_t a_offset, const size_t a_ld,
|
||||||
|
const Buffer<T> &b_buffer, const size_t b_offset, const size_t b_ld,
|
||||||
|
const T beta,
|
||||||
|
const Buffer<T> &c_buffer, const size_t c_offset, const size_t c_ld,
|
||||||
|
const bool a_do_transpose, const bool b_do_transpose, const bool c_do_transpose,
|
||||||
|
const bool a_conjugate, const bool b_conjugate) {
|
||||||
|
|
||||||
|
// Loads the program from the database
|
||||||
|
const auto program = GetProgramFromCache(context_, PrecisionValue<T>(), routine_name_);
|
||||||
|
|
||||||
|
// Retrieves the proper XgemmDirect kernel from the compiled binary
|
||||||
|
try {
|
||||||
|
const auto name = (a_do_transpose) ? (b_do_transpose ? "XgemmDirectTT" : "XgemmDirectTN") :
|
||||||
|
(b_do_transpose ? "XgemmDirectNT" : "XgemmDirectNN");
|
||||||
|
auto kernel = Kernel(program, name);
|
||||||
|
|
||||||
|
// Sets the kernel arguments
|
||||||
|
kernel.SetArgument(0, static_cast<int>(m));
|
||||||
|
kernel.SetArgument(1, static_cast<int>(n));
|
||||||
|
kernel.SetArgument(2, static_cast<int>(k));
|
||||||
|
kernel.SetArgument(3, GetRealArg(alpha));
|
||||||
|
kernel.SetArgument(4, GetRealArg(beta));
|
||||||
|
kernel.SetArgument(5, a_buffer());
|
||||||
|
kernel.SetArgument(6, static_cast<int>(a_offset));
|
||||||
|
kernel.SetArgument(7, static_cast<int>(a_ld));
|
||||||
|
kernel.SetArgument(8, b_buffer());
|
||||||
|
kernel.SetArgument(9, static_cast<int>(b_offset));
|
||||||
|
kernel.SetArgument(10, static_cast<int>(b_ld));
|
||||||
|
kernel.SetArgument(11, c_buffer());
|
||||||
|
kernel.SetArgument(12, static_cast<int>(c_offset));
|
||||||
|
kernel.SetArgument(13, static_cast<int>(c_ld));
|
||||||
|
kernel.SetArgument(14, static_cast<int>(c_do_transpose));
|
||||||
|
kernel.SetArgument(15, static_cast<int>(a_conjugate));
|
||||||
|
kernel.SetArgument(16, static_cast<int>(b_conjugate));
|
||||||
|
|
||||||
|
// Computes the global and local thread sizes
|
||||||
|
const auto m_ceiled = Ceil(m, db_["WGD"]);
|
||||||
|
const auto n_ceiled = Ceil(n, db_["WGD"]);
|
||||||
|
const auto global = std::vector<size_t>{
|
||||||
|
(m_ceiled * db_["MDIMCD"]) / db_["WGD"],
|
||||||
|
(n_ceiled * db_["NDIMCD"]) / db_["WGD"]
|
||||||
|
};
|
||||||
|
const auto local = std::vector<size_t>{db_["MDIMCD"], db_["NDIMCD"]};
|
||||||
|
|
||||||
|
// Launches the kernel
|
||||||
|
auto status = RunKernel(kernel, queue_, device_, global, local, event_);
|
||||||
|
if (ErrorIn(status)) { return status; }
|
||||||
|
|
||||||
|
// Successfully finished the computation
|
||||||
|
return StatusCode::kSuccess;
|
||||||
|
} catch (...) { return StatusCode::kInvalidKernel; }
|
||||||
|
}
|
||||||
|
|
||||||
// =================================================================================================
|
// =================================================================================================
|
||||||
|
|
||||||
// Compiles the templated class
|
// Compiles the templated class
|
||||||
|
|
|
@ -35,6 +35,29 @@ class Xgemm: public Routine {
|
||||||
const Buffer<T> &b_buffer, const size_t b_offset, const size_t b_ld,
|
const Buffer<T> &b_buffer, const size_t b_offset, const size_t b_ld,
|
||||||
const T beta,
|
const T beta,
|
||||||
const Buffer<T> &c_buffer, const size_t c_offset, const size_t c_ld);
|
const Buffer<T> &c_buffer, const size_t c_offset, const size_t c_ld);
|
||||||
|
|
||||||
|
// Indirect version of GEMM (with pre and post-processing kernels)
|
||||||
|
StatusCode GemmIndirect(const size_t m, const size_t n, const size_t k,
|
||||||
|
const T alpha,
|
||||||
|
const Buffer<T> &a_buffer, const size_t a_offset, const size_t a_ld,
|
||||||
|
const Buffer<T> &b_buffer, const size_t b_offset, const size_t b_ld,
|
||||||
|
const T beta,
|
||||||
|
const Buffer<T> &c_buffer, const size_t c_offset, const size_t c_ld,
|
||||||
|
const bool a_do_transpose, const bool b_do_transpose, const bool c_do_transpose,
|
||||||
|
const bool a_conjugate, const bool b_conjugate,
|
||||||
|
const size_t a_one, const size_t a_two, const bool a_want_rotated,
|
||||||
|
const size_t b_one, const size_t b_two, const bool b_want_rotated,
|
||||||
|
const size_t c_one, const size_t c_two, const bool c_want_rotated);
|
||||||
|
|
||||||
|
// Direct version of GEMM (no pre and post-processing kernels)
|
||||||
|
StatusCode GemmDirect(const size_t m, const size_t n, const size_t k,
|
||||||
|
const T alpha,
|
||||||
|
const Buffer<T> &a_buffer, const size_t a_offset, const size_t a_ld,
|
||||||
|
const Buffer<T> &b_buffer, const size_t b_offset, const size_t b_ld,
|
||||||
|
const T beta,
|
||||||
|
const Buffer<T> &c_buffer, const size_t c_offset, const size_t c_ld,
|
||||||
|
const bool a_do_transpose, const bool b_do_transpose, const bool c_do_transpose,
|
||||||
|
const bool a_conjugate, const bool b_conjugate);
|
||||||
};
|
};
|
||||||
|
|
||||||
// =================================================================================================
|
// =================================================================================================
|
||||||
|
|
|
@ -47,6 +47,7 @@ class TuneCopy {
|
||||||
static size_t DefaultN() { return 1024; }
|
static size_t DefaultN() { return 1024; }
|
||||||
static size_t DefaultK() { return 1; } // N/A for this kernel
|
static size_t DefaultK() { return 1; } // N/A for this kernel
|
||||||
static double DefaultFraction() { return 1.0; } // N/A for this kernel
|
static double DefaultFraction() { return 1.0; } // N/A for this kernel
|
||||||
|
static size_t DefaultNumRuns() { return 2; } // run every kernel this many times for averaging
|
||||||
|
|
||||||
// Describes how to obtain the sizes of the buffers
|
// Describes how to obtain the sizes of the buffers
|
||||||
static size_t GetSizeX(const Arguments<T> &) { return 1; } // N/A for this kernel
|
static size_t GetSizeX(const Arguments<T> &) { return 1; } // N/A for this kernel
|
||||||
|
|
|
@ -47,6 +47,7 @@ class TunePad {
|
||||||
static size_t DefaultN() { return 1024; }
|
static size_t DefaultN() { return 1024; }
|
||||||
static size_t DefaultK() { return 1; } // N/A for this kernel
|
static size_t DefaultK() { return 1; } // N/A for this kernel
|
||||||
static double DefaultFraction() { return 1.0; } // N/A for this kernel
|
static double DefaultFraction() { return 1.0; } // N/A for this kernel
|
||||||
|
static size_t DefaultNumRuns() { return 2; } // run every kernel this many times for averaging
|
||||||
|
|
||||||
// Describes how to obtain the sizes of the buffers
|
// Describes how to obtain the sizes of the buffers
|
||||||
static size_t GetSizeX(const Arguments<T> &) { return 1; } // N/A for this kernel
|
static size_t GetSizeX(const Arguments<T> &) { return 1; } // N/A for this kernel
|
||||||
|
|
|
@ -47,6 +47,7 @@ class TuneTranspose {
|
||||||
static size_t DefaultN() { return 1024; }
|
static size_t DefaultN() { return 1024; }
|
||||||
static size_t DefaultK() { return 1; } // N/A for this kernel
|
static size_t DefaultK() { return 1; } // N/A for this kernel
|
||||||
static double DefaultFraction() { return 1.0; } // N/A for this kernel
|
static double DefaultFraction() { return 1.0; } // N/A for this kernel
|
||||||
|
static size_t DefaultNumRuns() { return 2; } // run every kernel this many times for averaging
|
||||||
|
|
||||||
// Describes how to obtain the sizes of the buffers
|
// Describes how to obtain the sizes of the buffers
|
||||||
static size_t GetSizeX(const Arguments<T> &) { return 1; } // N/A for this kernel
|
static size_t GetSizeX(const Arguments<T> &) { return 1; } // N/A for this kernel
|
||||||
|
|
|
@ -47,6 +47,7 @@ class TunePadTranspose {
|
||||||
static size_t DefaultN() { return 1024; }
|
static size_t DefaultN() { return 1024; }
|
||||||
static size_t DefaultK() { return 1; } // N/A for this kernel
|
static size_t DefaultK() { return 1; } // N/A for this kernel
|
||||||
static double DefaultFraction() { return 1.0; } // N/A for this kernel
|
static double DefaultFraction() { return 1.0; } // N/A for this kernel
|
||||||
|
static size_t DefaultNumRuns() { return 2; } // run every kernel this many times for averaging
|
||||||
|
|
||||||
// Describes how to obtain the sizes of the buffers
|
// Describes how to obtain the sizes of the buffers
|
||||||
static size_t GetSizeX(const Arguments<T> &) { return 1; } // N/A for this kernel
|
static size_t GetSizeX(const Arguments<T> &) { return 1; } // N/A for this kernel
|
||||||
|
|
|
@ -51,6 +51,7 @@ class TuneXaxpy {
|
||||||
static size_t DefaultN() { return 4096*1024; }
|
static size_t DefaultN() { return 4096*1024; }
|
||||||
static size_t DefaultK() { return 1; } // N/A for this kernel
|
static size_t DefaultK() { return 1; } // N/A for this kernel
|
||||||
static double DefaultFraction() { return 1.0; } // N/A for this kernel
|
static double DefaultFraction() { return 1.0; } // N/A for this kernel
|
||||||
|
static size_t DefaultNumRuns() { return 2; } // run every kernel this many times for averaging
|
||||||
|
|
||||||
// Describes how to obtain the sizes of the buffers
|
// Describes how to obtain the sizes of the buffers
|
||||||
static size_t GetSizeX(const Arguments<T> &args) { return args.n; }
|
static size_t GetSizeX(const Arguments<T> &args) { return args.n; }
|
||||||
|
|
|
@ -47,6 +47,7 @@ class TuneXdot {
|
||||||
static size_t DefaultN() { return 2*1024*1024; }
|
static size_t DefaultN() { return 2*1024*1024; }
|
||||||
static size_t DefaultK() { return 1; } // N/A for this kernel
|
static size_t DefaultK() { return 1; } // N/A for this kernel
|
||||||
static double DefaultFraction() { return 1.0; } // N/A for this kernel
|
static double DefaultFraction() { return 1.0; } // N/A for this kernel
|
||||||
|
static size_t DefaultNumRuns() { return 2; } // run every kernel this many times for averaging
|
||||||
|
|
||||||
// Describes how to obtain the sizes of the buffers
|
// Describes how to obtain the sizes of the buffers
|
||||||
static size_t GetSizeX(const Arguments<T> &args) { return args.n; }
|
static size_t GetSizeX(const Arguments<T> &args) { return args.n; }
|
||||||
|
|
|
@ -52,6 +52,7 @@ class TuneXgemm {
|
||||||
static size_t DefaultN() { return 1024; }
|
static size_t DefaultN() { return 1024; }
|
||||||
static size_t DefaultK() { return 1024; }
|
static size_t DefaultK() { return 1024; }
|
||||||
static double DefaultFraction() { return (V==1) ? 1.0 : 512.0; } // test all or sample randomly
|
static double DefaultFraction() { return (V==1) ? 1.0 : 512.0; } // test all or sample randomly
|
||||||
|
static size_t DefaultNumRuns() { return 2; } // run every kernel this many times for averaging
|
||||||
|
|
||||||
// Describes how to obtain the sizes of the buffers
|
// Describes how to obtain the sizes of the buffers
|
||||||
static size_t GetSizeX(const Arguments<T> &) { return 1; } // N/A for this kernel
|
static size_t GetSizeX(const Arguments<T> &) { return 1; } // N/A for this kernel
|
||||||
|
|
196
src/tuning/kernels/xgemm_direct.cpp
Normal file
196
src/tuning/kernels/xgemm_direct.cpp
Normal file
|
@ -0,0 +1,196 @@
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
|
||||||
|
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
|
||||||
|
// width of 100 characters per line.
|
||||||
|
//
|
||||||
|
// Author(s):
|
||||||
|
// Cedric Nugteren <www.cedricnugteren.nl>
|
||||||
|
//
|
||||||
|
// This file uses the CLTune auto-tuner to tune the direct xgemm kernels. There are two variations:
|
||||||
|
// - V==1: This tests some limited set of tuning parameters exhaustively.
|
||||||
|
// - V==2: This tests a much larger set of tuning parameters by randomly sampling a subset.
|
||||||
|
//
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "utilities.hpp"
|
||||||
|
#include "tuning/tuning.hpp"
|
||||||
|
|
||||||
|
namespace clblast {
|
||||||
|
// =================================================================================================
|
||||||
|
|
||||||
|
// See comment at top of file for a description of the class
|
||||||
|
template <typename T, int V>
|
||||||
|
class TuneXgemmDirect {
|
||||||
|
public:
|
||||||
|
|
||||||
|
// The representative kernel and the source code
|
||||||
|
static std::string KernelFamily() { return (V==1) ? "xgemm_direct_1" : "xgemm_direct_2"; }
|
||||||
|
static std::string KernelName() { return "XgemmDirectTN"; }
|
||||||
|
static std::string GetSources() {
|
||||||
|
return
|
||||||
|
#include "../src/kernels/common.opencl"
|
||||||
|
#include "../src/kernels/level3/xgemm_direct_part1.opencl"
|
||||||
|
#include "../src/kernels/level3/xgemm_direct_part2.opencl"
|
||||||
|
#include "../src/kernels/level3/xgemm_direct_part3.opencl"
|
||||||
|
;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The list of arguments relevant for this routine
|
||||||
|
static std::vector<std::string> GetOptions() {
|
||||||
|
return {kArgM, kArgN, kArgK, kArgAlpha, kArgBeta, kArgFraction};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests for valid arguments
|
||||||
|
static void TestValidArguments(const Arguments<T> &) { }
|
||||||
|
|
||||||
|
// Sets the default values for the arguments
|
||||||
|
static size_t DefaultM() { return 256; }
|
||||||
|
static size_t DefaultN() { return 256; }
|
||||||
|
static size_t DefaultK() { return 256; }
|
||||||
|
static double DefaultFraction() { return (V==1) ? 1.0 : 32.0; } // test all or sample randomly
|
||||||
|
static size_t DefaultNumRuns() { return 4; } // run every kernel this many times for averaging
|
||||||
|
|
||||||
|
// Describes how to obtain the sizes of the buffers
|
||||||
|
static size_t GetSizeX(const Arguments<T> &) { return 1; } // N/A for this kernel
|
||||||
|
static size_t GetSizeY(const Arguments<T> &) { return 1; } // N/A for this kernel
|
||||||
|
static size_t GetSizeA(const Arguments<T> &args) { return args.m * args.k; }
|
||||||
|
static size_t GetSizeB(const Arguments<T> &args) { return args.n * args.k; }
|
||||||
|
static size_t GetSizeC(const Arguments<T> &args) { return args.m * args.n; }
|
||||||
|
static size_t GetSizeTemp(const Arguments<T> &) { return 1; } // N/A for this kernel
|
||||||
|
|
||||||
|
// Sets the tuning parameters and their possible values
|
||||||
|
static void SetParameters(cltune::Tuner &tuner, const size_t id) {
|
||||||
|
if (V==1) { // limited subset of tuning parameters - but explorable exhaustively
|
||||||
|
tuner.AddParameter(id, "WGD", {8, 16, 32});
|
||||||
|
tuner.AddParameter(id, "MDIMCD", {8, 16, 32});
|
||||||
|
tuner.AddParameter(id, "NDIMCD", {8, 16, 32});
|
||||||
|
tuner.AddParameter(id, "MDIMAD", {8, 16, 32});
|
||||||
|
tuner.AddParameter(id, "NDIMBD", {8, 16, 32});
|
||||||
|
tuner.AddParameter(id, "KWID", {2});
|
||||||
|
tuner.AddParameter(id, "VWMD", {1, 2, 4, 8});
|
||||||
|
tuner.AddParameter(id, "VWND", {1, 2, 4, 8});
|
||||||
|
tuner.AddParameter(id, "PADA", {1});
|
||||||
|
tuner.AddParameter(id, "PADB", {1});
|
||||||
|
} // a lot more tuning parameters - has to be sampled randomly, too much to test all
|
||||||
|
else {
|
||||||
|
tuner.AddParameter(id, "WGD", {8, 16, 32, 64, 128});
|
||||||
|
tuner.AddParameter(id, "MDIMCD", {8, 16, 32});
|
||||||
|
tuner.AddParameter(id, "NDIMCD", {8, 16, 32});
|
||||||
|
tuner.AddParameter(id, "MDIMAD", {8, 16, 32});
|
||||||
|
tuner.AddParameter(id, "NDIMBD", {8, 16, 32});
|
||||||
|
tuner.AddParameter(id, "KWID", {2, 8, 16});
|
||||||
|
tuner.AddParameter(id, "VWMD", {1, 2, 4, 8});
|
||||||
|
tuner.AddParameter(id, "VWND", {1, 2, 4, 8});
|
||||||
|
tuner.AddParameter(id, "PADA", {0, 1});
|
||||||
|
tuner.AddParameter(id, "PADB", {0, 1});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sets the constraints
|
||||||
|
static void SetConstraints(cltune::Tuner &tuner, const size_t id) {
|
||||||
|
auto MultipleOfX = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]); };
|
||||||
|
auto MultipleOfXMulY = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]*v[2]); };
|
||||||
|
auto MultipleOfXMulYDivZ = [] (std::vector<size_t> v) { return IsMultiple(v[0], (v[1]*v[2])/v[3]); };
|
||||||
|
// Requirement for unrolling the WGD loop
|
||||||
|
tuner.AddConstraint(id, MultipleOfX, {"WGD", "KWID"});
|
||||||
|
// Required for integer MWID and NWID
|
||||||
|
tuner.AddConstraint(id, MultipleOfXMulY, {"WGD", "MDIMCD", "VWMD"});
|
||||||
|
tuner.AddConstraint(id, MultipleOfXMulY, {"WGD", "NDIMCD", "VWND"});
|
||||||
|
// Required for integer MWIAD and NWIBD
|
||||||
|
tuner.AddConstraint(id, MultipleOfXMulY, {"WGD", "MDIMAD", "VWMD"});
|
||||||
|
tuner.AddConstraint(id, MultipleOfXMulY, {"WGD", "NDIMBD", "VWND"});
|
||||||
|
// WGD has to be a multiple of KDIMAD = ((MDIMCD*NDIMCD)/(MDIMAD)) and KDIMBD = (...)
|
||||||
|
tuner.AddConstraint(id, MultipleOfXMulYDivZ, {"WGD", "MDIMCD", "NDIMCD", "MDIMAD"});
|
||||||
|
tuner.AddConstraint(id, MultipleOfXMulYDivZ, {"WGD", "MDIMCD", "NDIMCD", "NDIMBD"});
|
||||||
|
|
||||||
|
// Extra constraints for variation 1 to limit the set of options significantly
|
||||||
|
if (V==1) {
|
||||||
|
auto IsEqual = [] (std::vector<size_t> v) { return v[0] == v[1]; };
|
||||||
|
tuner.AddConstraint(id, IsEqual, {"MDIMCD", "MDIMAD"});
|
||||||
|
tuner.AddConstraint(id, IsEqual, {"NDIMCD", "NDIMBD"});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sets the local memory size
|
||||||
|
static void SetLocalMemorySize(cltune::Tuner &tuner, const size_t id, const Arguments<T> &args) {
|
||||||
|
auto LocalMemorySize = [args] (std::vector<size_t> v) {
|
||||||
|
return ((v[0]*(v[0] + v[1]) + v[0]*(v[0] + v[2]))*GetBytes(args.precision));
|
||||||
|
};
|
||||||
|
tuner.SetLocalMemoryUsage(id, LocalMemorySize, {"WGD", "PADA", "PADB"});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sets the base thread configuration
|
||||||
|
static std::vector<size_t> GlobalSize(const Arguments<T> &args) { return {args.m, args.n}; }
|
||||||
|
static std::vector<size_t> GlobalSizeRef(const Arguments<T> &args) { return GlobalSize(args); }
|
||||||
|
static std::vector<size_t> LocalSize() { return {1, 1}; }
|
||||||
|
static std::vector<size_t> LocalSizeRef() { return {8, 8}; }
|
||||||
|
|
||||||
|
// Transforms the thread configuration based on the parameters
|
||||||
|
using TransformVector = std::vector<std::vector<std::string>>;
|
||||||
|
static TransformVector MulLocal() { return {{"MDIMCD", "NDIMCD"}}; }
|
||||||
|
static TransformVector DivLocal() { return {}; }
|
||||||
|
static TransformVector MulGlobal() { return {{"MDIMCD", "NDIMCD"}}; }
|
||||||
|
static TransformVector DivGlobal() { return {{"WGD", "WGD"}}; }
|
||||||
|
|
||||||
|
// Sets the kernel's arguments
|
||||||
|
static void SetArguments(cltune::Tuner &tuner, const Arguments<T> &args,
|
||||||
|
std::vector<T> &, std::vector<T> &,
|
||||||
|
std::vector<T> &a_mat, std::vector<T> &b_mat, std::vector<T> &c_mat,
|
||||||
|
std::vector<T> &) {
|
||||||
|
tuner.AddArgumentScalar(static_cast<int>(args.m));
|
||||||
|
tuner.AddArgumentScalar(static_cast<int>(args.n));
|
||||||
|
tuner.AddArgumentScalar(static_cast<int>(args.k));
|
||||||
|
tuner.AddArgumentScalar(GetRealArg(args.alpha));
|
||||||
|
tuner.AddArgumentScalar(GetRealArg(args.beta));
|
||||||
|
tuner.AddArgumentInput(a_mat);
|
||||||
|
tuner.AddArgumentScalar(0); // a_offset
|
||||||
|
tuner.AddArgumentScalar(static_cast<int>(args.k)); // a_ld
|
||||||
|
tuner.AddArgumentInput(b_mat);
|
||||||
|
tuner.AddArgumentScalar(0); // b_offset
|
||||||
|
tuner.AddArgumentScalar(static_cast<int>(args.n)); // b_ld
|
||||||
|
tuner.AddArgumentOutput(c_mat);
|
||||||
|
tuner.AddArgumentScalar(0); // c_offset
|
||||||
|
tuner.AddArgumentScalar(static_cast<int>(args.n)); // c_ld
|
||||||
|
tuner.AddArgumentScalar(1); // c_do_transpose
|
||||||
|
tuner.AddArgumentScalar(0); // a_conjugate
|
||||||
|
tuner.AddArgumentScalar(0); // b_conjugate
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describes how to compute the performance metrics
|
||||||
|
static size_t GetMetric(const Arguments<T> &args) {
|
||||||
|
return 2 * args.m * args.n * args.k;
|
||||||
|
}
|
||||||
|
static std::string PerformanceUnit() { return "GFLOPS"; }
|
||||||
|
};
|
||||||
|
|
||||||
|
// =================================================================================================
|
||||||
|
} // namespace clblast
|
||||||
|
|
||||||
|
// Shortcuts to the clblast namespace
|
||||||
|
using float2 = clblast::float2;
|
||||||
|
using double2 = clblast::double2;
|
||||||
|
|
||||||
|
// Function to tune a specific variation V (not within the clblast namespace)
|
||||||
|
template <int V>
|
||||||
|
void StartVariation(int argc, char *argv[]) {
|
||||||
|
switch(clblast::GetPrecision(argc, argv)) {
|
||||||
|
case clblast::Precision::kHalf: clblast::Tuner<clblast::TuneXgemmDirect<half,V>, half>(argc, argv); break;
|
||||||
|
case clblast::Precision::kSingle: clblast::Tuner<clblast::TuneXgemmDirect<float,V>, float>(argc, argv); break;
|
||||||
|
case clblast::Precision::kDouble: clblast::Tuner<clblast::TuneXgemmDirect<double,V>, double>(argc, argv); break;
|
||||||
|
case clblast::Precision::kComplexSingle: clblast::Tuner<clblast::TuneXgemmDirect<float2,V>, float2>(argc, argv); break;
|
||||||
|
case clblast::Precision::kComplexDouble: clblast::Tuner<clblast::TuneXgemmDirect<double2,V>, double2>(argc, argv); break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Main function (not within the clblast namespace)
|
||||||
|
int main(int argc, char *argv[]) {
|
||||||
|
StartVariation<1>(argc, argv);
|
||||||
|
StartVariation<2>(argc, argv);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// =================================================================================================
|
|
@ -50,6 +50,7 @@ class TuneXgemv {
|
||||||
static size_t DefaultN() { return 2048; }
|
static size_t DefaultN() { return 2048; }
|
||||||
static size_t DefaultK() { return 1; } // N/A for this kernel
|
static size_t DefaultK() { return 1; } // N/A for this kernel
|
||||||
static double DefaultFraction() { return 1.0; } // N/A for this kernel
|
static double DefaultFraction() { return 1.0; } // N/A for this kernel
|
||||||
|
static size_t DefaultNumRuns() { return 2; } // run every kernel this many times for averaging
|
||||||
|
|
||||||
// Describes how to obtain the sizes of the buffers
|
// Describes how to obtain the sizes of the buffers
|
||||||
static size_t GetSizeX(const Arguments<T> &args) { return args.n; }
|
static size_t GetSizeX(const Arguments<T> &args) { return args.n; }
|
||||||
|
|
|
@ -47,6 +47,7 @@ class TuneXger {
|
||||||
static size_t DefaultN() { return 1024; }
|
static size_t DefaultN() { return 1024; }
|
||||||
static size_t DefaultK() { return 1; } // N/A for this kernel
|
static size_t DefaultK() { return 1; } // N/A for this kernel
|
||||||
static double DefaultFraction() { return 1.0; } // N/A for this kernel
|
static double DefaultFraction() { return 1.0; } // N/A for this kernel
|
||||||
|
static size_t DefaultNumRuns() { return 2; } // run every kernel this many times for averaging
|
||||||
|
|
||||||
// Describes how to obtain the sizes of the buffers
|
// Describes how to obtain the sizes of the buffers
|
||||||
static size_t GetSizeX(const Arguments<T> &args) { return args.m; }
|
static size_t GetSizeX(const Arguments<T> &args) { return args.m; }
|
||||||
|
|
|
@ -46,7 +46,7 @@ void Tuner(int argc, char* argv[]) {
|
||||||
if (o == kArgBeta) { args.beta = GetArgument(argc, argv, help, kArgBeta, GetScalar<T>()); }
|
if (o == kArgBeta) { args.beta = GetArgument(argc, argv, help, kArgBeta, GetScalar<T>()); }
|
||||||
if (o == kArgFraction) { args.fraction = GetArgument(argc, argv, help, kArgFraction, C::DefaultFraction()); }
|
if (o == kArgFraction) { args.fraction = GetArgument(argc, argv, help, kArgFraction, C::DefaultFraction()); }
|
||||||
}
|
}
|
||||||
const auto num_runs = GetArgument(argc, argv, help, kArgNumRuns, size_t{1});
|
const auto num_runs = GetArgument(argc, argv, help, kArgNumRuns, C::DefaultNumRuns());
|
||||||
|
|
||||||
fprintf(stdout, "%s\n", help.c_str());
|
fprintf(stdout, "%s\n", help.c_str());
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue