Merge pull request #23 from CNugteren/tuner_database

Added initial version of a tuner-database
This commit is contained in:
Cedric Nugteren 2015-08-20 08:38:18 +02:00
commit cf168fca70
15 changed files with 1004 additions and 723 deletions

View file

@ -109,6 +109,7 @@ set(LEVEL1_ROUTINES xaxpy)
set(LEVEL2_ROUTINES xgemv xhemv xsymv)
set(LEVEL3_ROUTINES xgemm xsymm xhemm xsyrk xherk xsyr2k xher2k xtrmm)
set(ROUTINES ${LEVEL1_ROUTINES} ${LEVEL2_ROUTINES} ${LEVEL3_ROUTINES})
set(PRECISIONS 32 3232 64 6464)
# ==================================================================================================
@ -135,6 +136,17 @@ install(FILES include/clblast_c.h DESTINATION include)
# ==================================================================================================
# Sets a default platform and device to run tuners and/or tests on
set(DEVICEPLATFORM )
if(DEFINED ENV{DEFAULT_DEVICE})
set(DEVICEPLATFORM ${DEVICEPLATFORM} -device $ENV{DEFAULT_DEVICE})
endif()
if(DEFINED ENV{DEFAULT_PLATFORM})
set(DEVICEPLATFORM ${DEVICEPLATFORM} -platform $ENV{DEFAULT_PLATFORM})
endif()
# ==================================================================================================
# This section contains all the code related to the examples
if(SAMPLES)
@ -163,16 +175,24 @@ if(TUNERS)
# Includes CLTune
include_directories(${CLTUNE_INCLUDE_DIRS})
# Creates the common tuner objects (requires CMake 2.8.8)
add_library(tuners_common OBJECT src/tuning/tuning.cc)
# Adds tuning executables
foreach(KERNEL ${KERNELS})
add_executable(tuner_${KERNEL} $<TARGET_OBJECTS:tuners_common> src/tuning/${KERNEL}.cc)
add_executable(tuner_${KERNEL} src/tuning/${KERNEL}.cc)
target_link_libraries(tuner_${KERNEL} clblast ${CLTUNE_LIBRARIES} ${OPENCL_LIBRARIES})
install(TARGETS tuner_${KERNEL} DESTINATION bin)
endforeach()
# Adds 'alltuners' target: runs all tuners for all precisions
set(ALLTUNERS )
set(ALLTUNERSDEPENDS )
foreach(KERNEL ${KERNELS})
foreach(PRECISION ${PRECISIONS})
set(ALLTUNERS ${ALLTUNERS} COMMAND tuner_${KERNEL} -precision ${PRECISION} ${DEVICEPLATFORM})
endforeach()
set(ALLTUNERSDEPENDS tuner_${KERNEL})
endforeach()
add_custom_target(alltuners ${ALLTUNERS} DEPENDS ${ALLTUNERSDEPENDS})
endif()
# ==================================================================================================

View file

@ -7,9 +7,8 @@
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file implements the header for the tuner functions. This is only used for the optional
// and stand-alone tuner binaries and not part of the core of CLBlast. The convention used here is
// that X and Y are vectors, while A, B, and C are matrices.
// This file implements the interface to the CLTune auto-tuner. This is only used for the optional
// and stand-alone tuner binaries and not part of the core of CLBlast.
//
// =================================================================================================
@ -17,44 +16,121 @@
#define CLBLAST_TUNING_H_
#include <vector>
#include <functional>
#include <string>
#include <cltune.h>
namespace clblast {
// =================================================================================================
// Functions with two or three OpenCL memory buffers
template <typename T>
using Tuner2 = std::function<void(const Arguments<T>&,
const std::vector<T>&, std::vector<T>&,
cltune::Tuner&)>;
template <typename T>
using Tuner3 = std::function<void(const Arguments<T>&,
const std::vector<T>&, const std::vector<T>&, std::vector<T>&,
cltune::Tuner&)>;
// Function to get command-line argument, set-up the input buffers, configure the tuner, and collect
// the results. Used for all types of kernel families. Note that this is a header-only function so
// that it is automatically compiled for the various kernels (given as the 'C' template argument).
template <typename C, typename T>
void Tuner(int argc, char* argv[]) {
// As above, but now with an additional ID for the variation
template <typename T>
using Tuner3V = std::function<void(const Arguments<T>&, const size_t,
const std::vector<T>&, const std::vector<T>&, std::vector<T>&,
cltune::Tuner&)>;
// Sets the parameters and platform/device for which to tune (command-line options)
auto help = std::string{"* Options given/available:\n"};
auto args = Arguments<T>{};
args.platform_id = GetArgument(argc, argv, help, kArgPlatform, size_t{0});
args.device_id = GetArgument(argc, argv, help, kArgDevice, size_t{0});
args.precision = GetArgument(argc, argv, help, kArgPrecision, Precision::kSingle);
for (auto &o: C::GetOptions()) {
if (o == kArgM) { args.m = GetArgument(argc, argv, help, kArgM, C::DefaultM()); }
if (o == kArgN) { args.n = GetArgument(argc, argv, help, kArgN, C::DefaultN()); }
if (o == kArgK) { args.k = GetArgument(argc, argv, help, kArgK, C::DefaultK()); }
if (o == kArgAlpha) { args.alpha = GetArgument(argc, argv, help, kArgAlpha, GetScalar<T>()); }
if (o == kArgBeta) { args.beta = GetArgument(argc, argv, help, kArgBeta, GetScalar<T>()); }
if (o == kArgFraction) { args.fraction = GetArgument(argc, argv, help, kArgFraction, C::DefaultFraction()); }
}
fprintf(stdout, "%s\n", help.c_str());
// Tuner for vector-vector input
template <typename T>
void TunerXY(int argc, char* argv[], const Tuner2<T> &tune_function);
// Tests validity of the given arguments
C::TestValidArguments(args);
// Tuner for matrix-vector-vector input
template <typename T>
void TunerAXY(int argc, char* argv[], const size_t num_variations, const Tuner3V<T> &tune_function);
// Tests for validity of the precision
{
auto platform = Platform(args.platform_id);
auto device = Device(platform, args.device_id);
if (!PrecisionSupported<T>(device)) {
printf("* Unsupported precision, skipping this tuning run\n\n");
return;
}
}
// Tuner for matrix-matrix input
template <typename T>
void TunerAB(int argc, char* argv[], const Tuner2<T> &tune_function);
// Creates input buffers with random data
auto x_vec = std::vector<T>(C::GetSizeX(args));
auto y_vec = std::vector<T>(C::GetSizeY(args));
auto a_mat = std::vector<T>(C::GetSizeA(args));
auto b_mat = std::vector<T>(C::GetSizeB(args));
auto c_mat = std::vector<T>(C::GetSizeC(args));
PopulateVector(x_vec);
PopulateVector(y_vec);
PopulateVector(a_mat);
PopulateVector(b_mat);
PopulateVector(c_mat);
// Tuner for matrix-matrix-matrix input
template <typename T>
void TunerABC(int argc, char* argv[], const Tuner3<T> &tune_function);
// Initializes the tuner for the chosen device
cltune::Tuner tuner(args.platform_id, args.device_id);
// Use full-search to explore all parameter combinations or random-search to search only a part of
// the parameter values. The fraction is set as a command-line argument.
if (args.fraction == 1.0 || args.fraction == 0.0) {
tuner.UseFullSearch();
}
else {
tuner.UseRandomSearch(1.0/args.fraction);
}
// Loads the kernel sources and defines the kernel to tune
auto sources = C::GetSources();
auto id = tuner.AddKernelFromString(sources, C::KernelName(), C::GlobalSize(args), C::LocalSize());
tuner.SetReferenceFromString(sources, C::KernelName(), C::GlobalSize(args), C::LocalSizeRef());
// Sets the tunable parameters and their possible values
C::SetParameters(tuner, id);
C::SetConstraints(tuner, id);
C::SetLocalMemorySize(tuner, id, args);
// Tests for a specific precision
tuner.AddParameter(id, "PRECISION", {static_cast<size_t>(args.precision)});
tuner.AddParameterReference("PRECISION", static_cast<size_t>(args.precision));
// Modifies the thread-sizes (both global and local) based on the parameters
for (auto &parameters: C::MulLocal()) { tuner.MulLocalSize(id, parameters); }
for (auto &parameters: C::DivLocal()) { tuner.DivLocalSize(id, parameters); }
for (auto &parameters: C::MulGlobal()) { tuner.MulGlobalSize(id, parameters); }
for (auto &parameters: C::DivGlobal()) { tuner.DivGlobalSize(id, parameters); }
// Sets the function's arguments
C::SetArguments(tuner, args, x_vec, y_vec, a_mat, b_mat, c_mat);
// Starts the tuning process
tuner.Tune();
// Prints the results to screen
auto time_ms = tuner.PrintToScreen();
tuner.PrintFormatted();
// Also prints the performance of the best-case in terms of GB/s or GFLOPS
if (time_ms != 0.0) {
printf("[ -------> ] %.1lf ms", time_ms);
printf(" or %.1lf %s\n", C::GetMetric(args)/(time_ms*1.0e6), C::PerformanceUnit().c_str());
}
// Outputs the results as JSON to disk, including some meta-data
auto precision_string = std::to_string(static_cast<size_t>(args.precision));
auto metadata = std::vector<std::pair<std::string,std::string>>{
{"kernel_family", C::KernelFamily()},
{"precision", precision_string}
};
for (auto &o: C::GetOptions()) {
if (o == kArgM) { metadata.push_back({"arg_m", std::to_string(args.m)}); }
if (o == kArgN) { metadata.push_back({"arg_n", std::to_string(args.n)}); }
if (o == kArgK) { metadata.push_back({"arg_k", std::to_string(args.k)}); }
}
tuner.PrintJSON("clblast_"+C::KernelFamily()+"_"+precision_string+".json", metadata);
}
// =================================================================================================
} // namespace clblast

View file

@ -197,6 +197,12 @@ bool IsMultiple(const size_t a, const size_t b);
// Convert the precision enum into bytes, e.g. a double takes up 8 bytes
size_t GetBytes(const Precision precision);
// =================================================================================================
// Returns false is this precision is not supported by the device
template <typename T>
bool PrecisionSupported(const Device &device);
// =================================================================================================
} // namespace clblast

208
src/database.py Normal file
View file

@ -0,0 +1,208 @@
#!/usr/bin/env python
# ==================================================================================================
# This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
# project loosely follows the Google C++ styleguide and uses a max-width of 100 characters per line.
#
# Author(s):
# Cedric Nugteren <www.cedricnugteren.nl>
#
# ==================================================================================================
# System modules
import sys
import os.path
import glob
import re
import json
# Additional modules
import pandas as pd
# Constants
ATTRIBUTES = ["device", "type", "vendor", "precision", "kernel_family", "arg_m", "arg_n", "arg_k"]
# Pandas options
pd.set_option('display.width', 1000)
# ==================================================================================================
# Database operations
# ==================================================================================================
# Loads the database from disk
def LoadDatabase(filename):
return pd.read_pickle(filename)
# Saves the database to disk
def SaveDatabase(df, filename):
df.to_pickle(filename)
# Loads JSON data from file
def ImportDataFromFile(filename):
with open(filename) as f:
data = json.load(f)
json_data = pd.DataFrame(data)
df = pd.io.json.json_normalize(json_data["results"])
for attribute in ATTRIBUTES:
if attribute == "kernel_family":
df[attribute] = re.sub(r'_\d+', '', data[attribute])
elif attribute in data:
df[attribute] = data[attribute]
else:
df[attribute] = 0
return df
# Returns the row-wise concatenation of two dataframes
def ConcatenateData(df1, df2):
return pd.concat([df1, df2])
# Removes duplicates from a dataframe
def RemoveDuplicates(df):
return df.drop_duplicates()
# Bests
def GetBestResults(df):
dfbest = pd.DataFrame()
grouped = df.groupby(ATTRIBUTES+["kernel"])
for name, dfgroup in grouped:
bestcase = dfgroup.loc[[dfgroup["time"].idxmin()]]
dfbest = ConcatenateData(dfbest, bestcase)
return dfbest
# ==================================================================================================
# C++ header generation
# ==================================================================================================
# The C++ header
def GetHeader(family):
return("""
// =================================================================================================
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
// width of 100 characters per line.
//
// Author(s):
// Database generator <database.py>
//
// This file populates the database with best-found tuning parameters for the '%s' kernels.
//
// =================================================================================================
namespace clblast {
// ================================================================================================="""
% family.title())
# The C++ footer
def GetFooter():
return("\n} // namespace clblast\n")
# The start of a new C++ precision entry
def GetPrecision(family, precision):
precisionstring = "Single"
if precision == "64":
precisionstring = "Double"
elif precision == "3232":
precisionstring = "ComplexSingle"
elif precision == "6464":
precisionstring = "ComplexDouble"
return("\n\nconst Database::DatabaseEntry Database::%s%s = {\n \"%s\", Precision::k%s, {\n"
% (family.title(), precisionstring, family.title(), precisionstring))
# The C++ device type and vendor
def GetDeviceVendor(vendor, devtype):
return(" { // %s %ss\n kDeviceType%s, kDeviceVendor%s, {\n"
% (vendor, devtype, devtype, vendor))
# Prints the data to a C++ database
def PrintData(df):
# Iterates over the kernel families: creates a new file per family
for family, dffamily in df.groupby(["kernel_family"]):
dffamily = dffamily.dropna(axis=1, how='all')
f = open(family+'.h', 'w+')
f.write(GetHeader(family))
# Loops over the different entries for this family and prints their headers
for precision, dfprecision in dffamily.groupby(["precision"]):
f.write(GetPrecision(family, precision))
for vendor, dfvendor in dfprecision.groupby(["vendor"]):
for devtype, dfdevtype in dfvendor.groupby(["type"]):
f.write(GetDeviceVendor(vendor, devtype))
for device, dfdevice in dfdevtype.groupby(["device"]):
devicename = "\"%s\"," % device
f.write(" { %-20s { " % devicename)
# Collects the paramaters for this case and prints them
parameters = []
for kernel, dfkernel in dfdevice.groupby(["kernel"]):
dfkernel = dfkernel.dropna(axis=1)
col_names = [col for col in list(dfkernel) if col.startswith('parameters.') and col != "parameters.PRECISION"]
parameters += ["{\"%s\",%d}" % (p.replace("parameters.",""), dfkernel[p].iloc[0]) for p in col_names]
f.write(", ".join(parameters))
f.write(" } },\n")
# Prints the footers
f.write(" }\n },\n")
f.write(" }\n};\n\n// =================================================================================================")
f.write(GetFooter())
# ==================================================================================================
# Command-line arguments parsing and verification
# ==================================================================================================
# Checks for the number of command-line arguments
if len(sys.argv) != 3:
print "[ERROR] Usage: database.py <folder_with_json_files> <root_of_clblast>"
sys.exit()
# Parses the command-line arguments
path_json = sys.argv[1]
path_clblast = sys.argv[2]
file_db = path_clblast+"/src/database.db"
glob_json = path_json+"/*.json"
# Checks whether the command-line arguments are valid; exists otherwise
clblast_h = path_clblast+"/include/clblast.h" # Not used but just for validation
if not os.path.isfile(clblast_h):
print "[ERROR] The path '"+path_clblast+"' does not point to the root of the CLBlast library"
sys.exit()
if len(glob.glob(glob_json)) < 1:
print "[ERROR] The path '"+path_json+"' does not contain any JSON files"
sys.exit()
# ==================================================================================================
# The main body of the script
# ==================================================================================================
# Loads the database if it exists. If not, a new database is initialized
db_exists = os.path.isfile(file_db)
database = LoadDatabase(file_db) if db_exists else pd.DataFrame()
# Loops over all JSON files in the supplied folder
for file_json in glob.glob(glob_json):
# Loads the newly imported data
print "## Processing '"+file_json+"'",
imported_data = ImportDataFromFile(file_json)
# Adds the new data to the database
old_size = len(database.index)
database = ConcatenateData(database, imported_data)
database = RemoveDuplicates(database)
new_size = len(database.index)
print "with "+str(new_size-old_size)+" new items"
# Stores the new database back to disk
SaveDatabase(database, file_db)
# Retrieves the best performing results
bests = GetBestResults(database)
# TODO: Determines the defaults for other vendors and per vendor
#defaults = CalculateDefaults(bests)
#bests = ConcatenateData(bests, defaults)
# Outputs the data as a C++ database
PrintData(bests)
# ==================================================================================================

View file

@ -7,13 +7,12 @@
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file implements an auto-tuner to tune the copy OpenCL kernels. It uses CLTune.
// This file uses the CLTune auto-tuner to tune the copy OpenCL kernels.
//
// =================================================================================================
#include <string>
#include <vector>
#include <stdexcept>
#include "internal/utilities.h"
#include "internal/tuning.h"
@ -21,61 +20,96 @@
namespace clblast {
// =================================================================================================
// The copy auto-tuner
// See comment at top of file for a description of the class
template <typename T>
void CopyTune(const Arguments<T> &args,
const std::vector<T> &a_mat, std::vector<T> &b_mat,
cltune::Tuner &tuner) {
class TuneCopy {
public:
// This points to the CopyMatrix kernel as found in the CLBlast library. This is just one example
// of a copy kernel. However, all copy-kernels use the same tuning parameters, so one has to be
// chosen as a representative.
std::string sources =
#include "../src/kernels/common.opencl"
#include "../src/kernels/copy.opencl"
;
auto id = tuner.AddKernelFromString(sources, "CopyMatrix", {args.m, args.n}, {1, 1});
tuner.SetReferenceFromString(sources, "CopyMatrix", {args.m, args.n}, {8, 8});
// Sets the tunable parameters and their possible values
tuner.AddParameter(id, "COPY_DIMX", {8, 16, 32});
tuner.AddParameter(id, "COPY_DIMY", {8, 16, 32});
tuner.AddParameter(id, "COPY_WPT", {1, 2, 4, 8});
tuner.AddParameter(id, "COPY_VW", {1, 2, 4, 8});
// Tests for a specific precision
tuner.AddParameter(id, "PRECISION", {static_cast<size_t>(args.precision)});
tuner.AddParameterReference("PRECISION", static_cast<size_t>(args.precision));
// Modifies the thread-sizes (both global and local) based on the parameters
tuner.MulLocalSize(id, {"COPY_DIMX", "COPY_DIMY"});
tuner.DivGlobalSize(id, {"COPY_VW", "COPY_WPT"});
// Sets the function's arguments
tuner.AddArgumentScalar(static_cast<int>(args.m));
tuner.AddArgumentInput(a_mat);
tuner.AddArgumentOutput(b_mat);
}
// =================================================================================================
// Main function which calls the common client code with the routine-specific function as argument.
void TunerCopy(int argc, char *argv[]) {
switch(GetPrecision(argc, argv)) {
case Precision::kHalf: throw std::runtime_error("Unsupported precision mode");
case Precision::kSingle: TunerAB<float>(argc, argv, CopyTune<float>); break;
case Precision::kDouble: TunerAB<double>(argc, argv, CopyTune<double>); break;
case Precision::kComplexSingle: TunerAB<float2>(argc, argv, CopyTune<float2>); break;
case Precision::kComplexDouble: TunerAB<double2>(argc, argv, CopyTune<double2>); break;
// The representative kernel and the source code
static std::string KernelFamily() { return "copy"; }
static std::string KernelName() { return "CopyMatrix"; }
static std::string GetSources() {
return
#include "../src/kernels/common.opencl"
#include "../src/kernels/copy.opencl"
;
}
}
// The list of arguments relevant for this routine
static std::vector<std::string> GetOptions() { return {kArgM, kArgN}; }
// Tests for valid arguments
static void TestValidArguments(const Arguments<T> &) { }
// Sets the default values for the arguments
static size_t DefaultM() { return 1024; }
static size_t DefaultN() { return 1024; }
static size_t DefaultK() { return 1; } // N/A for this kernel
static double DefaultFraction() { return 1.0; } // N/A for this kernel
// Describes how to obtain the sizes of the buffers
static size_t GetSizeX(const Arguments<T> &) { return 1; } // N/A for this kernel
static size_t GetSizeY(const Arguments<T> &) { return 1; } // N/A for this kernel
static size_t GetSizeA(const Arguments<T> &args) { return args.m * args.n; }
static size_t GetSizeB(const Arguments<T> &args) { return args.m * args.n; }
static size_t GetSizeC(const Arguments<T> &) { return 1; } // N/A for this kernel
// Sets the tuning parameters and their possible values
static void SetParameters(cltune::Tuner &tuner, const size_t id) {
tuner.AddParameter(id, "COPY_DIMX", {8, 16, 32});
tuner.AddParameter(id, "COPY_DIMY", {8, 16, 32});
tuner.AddParameter(id, "COPY_WPT", {1, 2, 4, 8});
tuner.AddParameter(id, "COPY_VW", {1, 2, 4, 8});
}
// Sets the constraints and local memory size
static void SetConstraints(cltune::Tuner &, const size_t) { }
static void SetLocalMemorySize(cltune::Tuner &, const size_t, const Arguments<T> &) { }
// Sets the base thread configuration
static std::vector<size_t> GlobalSize(const Arguments<T> &args) { return {args.m, args.n}; }
static std::vector<size_t> LocalSize() { return {1, 1}; }
static std::vector<size_t> LocalSizeRef() { return {8, 8}; }
// Transforms the thread configuration based on the parameters
using TransformVector = std::vector<std::vector<std::string>>;
static TransformVector MulLocal() { return {{"COPY_DIMX", "COPY_DIMY"}}; }
static TransformVector DivLocal() { return {}; }
static TransformVector MulGlobal() { return {}; }
static TransformVector DivGlobal() { return {{"COPY_VW", "COPY_WPT"}}; }
// Sets the kernel's arguments
static void SetArguments(cltune::Tuner &tuner, const Arguments<T> &args,
std::vector<T> &, std::vector<T> &,
std::vector<T> &a_mat, std::vector<T> &b_mat, std::vector<T> &) {
tuner.AddArgumentScalar(static_cast<int>(args.m));
tuner.AddArgumentInput(a_mat);
tuner.AddArgumentOutput(b_mat);
}
// Describes how to compute the performance metrics
static size_t GetMetric(const Arguments<T> &args) {
return 2 * args.m * args.n * GetBytes(args.precision);
}
static std::string PerformanceUnit() { return "GB/s"; }
};
// =================================================================================================
} // namespace clblast
// Shortcuts to the clblast namespace
using float2 = clblast::float2;
using double2 = clblast::double2;
// Main function (not within the clblast namespace)
int main(int argc, char *argv[]) {
clblast::TunerCopy(argc, argv);
switch(clblast::GetPrecision(argc, argv)) {
case clblast::Precision::kHalf: throw std::runtime_error("Unsupported precision mode");
case clblast::Precision::kSingle: clblast::Tuner<clblast::TuneCopy<float>, float>(argc, argv); break;
case clblast::Precision::kDouble: clblast::Tuner<clblast::TuneCopy<double>, double>(argc, argv); break;
case clblast::Precision::kComplexSingle: clblast::Tuner<clblast::TuneCopy<float2>, float2>(argc, argv); break;
case clblast::Precision::kComplexDouble: clblast::Tuner<clblast::TuneCopy<double2>, double2>(argc, argv); break;
}
return 0;
}

View file

@ -7,13 +7,12 @@
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file implements an auto-tuner to tune the pad-copy OpenCL kernels. It uses CLTune.
// This file uses the CLTune auto-tuner to tune the pad OpenCL kernels.
//
// =================================================================================================
#include <string>
#include <vector>
#include <stdexcept>
#include "internal/utilities.h"
#include "internal/tuning.h"
@ -21,37 +20,68 @@
namespace clblast {
// =================================================================================================
// The pad auto-tuner
// See comment at top of file for a description of the class
template <typename T>
void PadTune(const Arguments<T> &args,
const std::vector<T> &a_mat, std::vector<T> &b_mat,
cltune::Tuner &tuner) {
class TunePad {
public:
// This points to the PadMatrix kernel as found in the CLBlast library. This is just one
// example of a pad kernel. However, all pad-kernels use the same tuning parameters, so one has
// to be chosen as a representative.
std::string sources =
#include "../src/kernels/common.opencl"
#include "../src/kernels/pad.opencl"
;
auto id = tuner.AddKernelFromString(sources, "PadMatrix", {args.m, args.n}, {1, 1});
tuner.SetReferenceFromString(sources, "PadMatrix", {args.m, args.n}, {8, 8});
// The representative kernel and the source code
static std::string KernelFamily() { return "pad"; }
static std::string KernelName() { return "PadMatrix"; }
static std::string GetSources() {
return
#include "../src/kernels/common.opencl"
#include "../src/kernels/pad.opencl"
;
}
// Sets the tunable parameters and their possible values
tuner.AddParameter(id, "PAD_DIMX", {8, 16, 32});
tuner.AddParameter(id, "PAD_DIMY", {8, 16, 32});
tuner.AddParameter(id, "PAD_WPTX", {1, 2, 4});
tuner.AddParameter(id, "PAD_WPTY", {1, 2, 4});
// The list of arguments relevant for this routine
static std::vector<std::string> GetOptions() { return {kArgM, kArgN}; }
// Tests for a specific precision
tuner.AddParameter(id, "PRECISION", {static_cast<size_t>(args.precision)});
tuner.AddParameterReference("PRECISION", static_cast<size_t>(args.precision));
// Tests for valid arguments
static void TestValidArguments(const Arguments<T> &) { }
// Modifies the thread-sizes (both global and local) based on the parameters
tuner.MulLocalSize(id, {"PAD_DIMX", "PAD_DIMY"});
tuner.DivGlobalSize(id, {"PAD_WPTX", "PAD_WPTY"});
// Sets the default values for the arguments
static size_t DefaultM() { return 1024; }
static size_t DefaultN() { return 1024; }
static size_t DefaultK() { return 1; } // N/A for this kernel
static double DefaultFraction() { return 1.0; } // N/A for this kernel
// Sets the function's arguments
// Describes how to obtain the sizes of the buffers
static size_t GetSizeX(const Arguments<T> &) { return 1; } // N/A for this kernel
static size_t GetSizeY(const Arguments<T> &) { return 1; } // N/A for this kernel
static size_t GetSizeA(const Arguments<T> &args) { return args.m * args.n; }
static size_t GetSizeB(const Arguments<T> &args) { return args.m * args.n; }
static size_t GetSizeC(const Arguments<T> &) { return 1; } // N/A for this kernel
// Sets the tuning parameters and their possible values
static void SetParameters(cltune::Tuner &tuner, const size_t id) {
tuner.AddParameter(id, "PAD_DIMX", {8, 16, 32});
tuner.AddParameter(id, "PAD_DIMY", {8, 16, 32});
tuner.AddParameter(id, "PAD_WPTX", {1, 2, 4});
tuner.AddParameter(id, "PAD_WPTY", {1, 2, 4});
}
// Sets the constraints and local memory size
static void SetConstraints(cltune::Tuner &, const size_t) { }
static void SetLocalMemorySize(cltune::Tuner &, const size_t, const Arguments<T> &) { }
// Sets the base thread configuration
static std::vector<size_t> GlobalSize(const Arguments<T> &args) { return {args.m, args.n}; }
static std::vector<size_t> LocalSize() { return {1, 1}; }
static std::vector<size_t> LocalSizeRef() { return {8, 8}; }
// Transforms the thread configuration based on the parameters
using TransformVector = std::vector<std::vector<std::string>>;
static TransformVector MulLocal() { return {{"PAD_DIMX", "PAD_DIMY"}}; }
static TransformVector DivLocal() { return {}; }
static TransformVector MulGlobal() { return {}; }
static TransformVector DivGlobal() { return {{"PAD_WPTX", "PAD_WPTY"}}; }
// Sets the kernel's arguments
static void SetArguments(cltune::Tuner &tuner, const Arguments<T> &args,
std::vector<T> &, std::vector<T> &,
std::vector<T> &a_mat, std::vector<T> &b_mat, std::vector<T> &) {
tuner.AddArgumentScalar(static_cast<int>(args.m));
tuner.AddArgumentScalar(static_cast<int>(args.n));
tuner.AddArgumentScalar(static_cast<int>(args.m));
@ -63,27 +93,31 @@ void PadTune(const Arguments<T> &args,
tuner.AddArgumentScalar(0);
tuner.AddArgumentOutput(b_mat);
tuner.AddArgumentScalar(0);
}
// =================================================================================================
// Main function which calls the common client code with the routine-specific function as argument.
void TunerPad(int argc, char *argv[]) {
switch(GetPrecision(argc, argv)) {
case Precision::kHalf: throw std::runtime_error("Unsupported precision mode");
case Precision::kSingle: TunerAB<float>(argc, argv, PadTune<float>); break;
case Precision::kDouble: TunerAB<double>(argc, argv, PadTune<double>); break;
case Precision::kComplexSingle: TunerAB<float2>(argc, argv, PadTune<float2>); break;
case Precision::kComplexDouble: TunerAB<double2>(argc, argv, PadTune<double2>); break;
}
}
// Describes how to compute the performance metrics
static size_t GetMetric(const Arguments<T> &args) {
return 2 * args.m * args.n * GetBytes(args.precision);
}
static std::string PerformanceUnit() { return "GB/s"; }
};
// =================================================================================================
} // namespace clblast
// Shortcuts to the clblast namespace
using float2 = clblast::float2;
using double2 = clblast::double2;
// Main function (not within the clblast namespace)
int main(int argc, char *argv[]) {
clblast::TunerPad(argc, argv);
switch(clblast::GetPrecision(argc, argv)) {
case clblast::Precision::kHalf: throw std::runtime_error("Unsupported precision mode");
case clblast::Precision::kSingle: clblast::Tuner<clblast::TunePad<float>, float>(argc, argv); break;
case clblast::Precision::kDouble: clblast::Tuner<clblast::TunePad<double>, double>(argc, argv); break;
case clblast::Precision::kComplexSingle: clblast::Tuner<clblast::TunePad<float2>, float2>(argc, argv); break;
case clblast::Precision::kComplexDouble: clblast::Tuner<clblast::TunePad<double2>, double2>(argc, argv); break;
}
return 0;
}

View file

@ -7,13 +7,12 @@
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file implements an auto-tuner to tune the pad-transpose OpenCL kernels. It uses CLTune.
// This file uses the CLTune auto-tuner to tune the padtranspose OpenCL kernels.
//
// =================================================================================================
#include <string>
#include <vector>
#include <stdexcept>
#include "internal/utilities.h"
#include "internal/tuning.h"
@ -21,74 +20,108 @@
namespace clblast {
// =================================================================================================
// The transpose auto-tuner
// See comment at top of file for a description of the class
template <typename T>
void PadTransposeTune(const Arguments<T> &args,
const std::vector<T> &a_mat, std::vector<T> &b_mat,
cltune::Tuner &tuner) {
class TunePadTranspose {
public:
// This points to the PadTransposeMatrix kernel as found in the CLBlast library. This is just one
// example of a transpose kernel. However, all kernels use the same tuning parameters, so one has
// to be chosen as a representative.
std::string sources =
#include "../src/kernels/common.opencl"
#include "../src/kernels/padtranspose.opencl"
;
auto id = tuner.AddKernelFromString(sources, "PadTransposeMatrix", {args.m, args.n}, {1, 1});
tuner.SetReferenceFromString(sources, "PadTransposeMatrix", {args.m, args.n}, {8, 8});
// Sets the tunable parameters and their possible values
tuner.AddParameter(id, "PADTRA_TILE", {8, 16, 32, 64});
tuner.AddParameter(id, "PADTRA_WPT", {1, 2, 4, 8, 16});
tuner.AddParameter(id, "PADTRA_PAD", {0, 1});
// Tests for a specific precision
tuner.AddParameter(id, "PRECISION", {static_cast<size_t>(args.precision)});
tuner.AddParameterReference("PRECISION", static_cast<size_t>(args.precision));
// Sets the constraints for local memory size limitations
auto LocalMemorySize = [args] (std::vector<size_t> v) {
return ((v[0]*v[1]*(v[0]*v[1]+v[2]))*GetBytes(args.precision));
};
tuner.SetLocalMemoryUsage(id, LocalMemorySize, {"PADTRA_TILE", "PADTRA_WPT", "PADTRA_PAD"});
// Modifies the thread-sizes (both global and local) based on the parameters
tuner.DivGlobalSize(id, {"PADTRA_WPT", "PADTRA_WPT"});
tuner.MulLocalSize(id, {"PADTRA_TILE", "PADTRA_TILE"});
// Sets the function's arguments
tuner.AddArgumentScalar(static_cast<int>(args.m));
tuner.AddArgumentScalar(static_cast<int>(args.n));
tuner.AddArgumentScalar(static_cast<int>(args.m));
tuner.AddArgumentScalar(0);
tuner.AddArgumentInput(a_mat);
tuner.AddArgumentScalar(static_cast<int>(args.n));
tuner.AddArgumentScalar(static_cast<int>(args.m));
tuner.AddArgumentScalar(static_cast<int>(args.n));
tuner.AddArgumentScalar(0);
tuner.AddArgumentOutput(b_mat);
tuner.AddArgumentScalar(0);
}
// =================================================================================================
// Main function which calls the common client code with the routine-specific function as argument.
void TunerPadTranspose(int argc, char *argv[]) {
switch(GetPrecision(argc, argv)) {
case Precision::kHalf: throw std::runtime_error("Unsupported precision mode");
case Precision::kSingle: TunerAB<float>(argc, argv, PadTransposeTune<float>); break;
case Precision::kDouble: TunerAB<double>(argc, argv, PadTransposeTune<double>); break;
case Precision::kComplexSingle: TunerAB<float2>(argc, argv, PadTransposeTune<float2>); break;
case Precision::kComplexDouble: TunerAB<double2>(argc, argv, PadTransposeTune<double2>); break;
// The representative kernel and the source code
static std::string KernelFamily() { return "padtranspose"; }
static std::string KernelName() { return "PadTransposeMatrix"; }
static std::string GetSources() {
return
#include "../src/kernels/common.opencl"
#include "../src/kernels/padtranspose.opencl"
;
}
}
// The list of arguments relevant for this routine
static std::vector<std::string> GetOptions() { return {kArgM, kArgN}; }
// Tests for valid arguments
static void TestValidArguments(const Arguments<T> &) { }
// Sets the default values for the arguments
static size_t DefaultM() { return 1024; }
static size_t DefaultN() { return 1024; }
static size_t DefaultK() { return 1; } // N/A for this kernel
static double DefaultFraction() { return 1.0; } // N/A for this kernel
// Describes how to obtain the sizes of the buffers
static size_t GetSizeX(const Arguments<T> &) { return 1; } // N/A for this kernel
static size_t GetSizeY(const Arguments<T> &) { return 1; } // N/A for this kernel
static size_t GetSizeA(const Arguments<T> &args) { return args.m * args.n; }
static size_t GetSizeB(const Arguments<T> &args) { return args.m * args.n; }
static size_t GetSizeC(const Arguments<T> &) { return 1; } // N/A for this kernel
// Sets the tuning parameters and their possible values
static void SetParameters(cltune::Tuner &tuner, const size_t id) {
tuner.AddParameter(id, "PADTRA_TILE", {8, 16, 32, 64});
tuner.AddParameter(id, "PADTRA_WPT", {1, 2, 4, 8, 16});
tuner.AddParameter(id, "PADTRA_PAD", {0, 1});
}
// Sets the constraints and local memory size
static void SetConstraints(cltune::Tuner &, const size_t) { }
static void SetLocalMemorySize(cltune::Tuner &tuner, const size_t id, const Arguments<T> &args) {
auto LocalMemorySize = [args] (std::vector<size_t> v) {
return ((v[0]*v[1]*(v[0]*v[1]+v[2]))*GetBytes(args.precision));
};
tuner.SetLocalMemoryUsage(id, LocalMemorySize, {"PADTRA_TILE", "PADTRA_WPT", "PADTRA_PAD"});
}
// Sets the base thread configuration
static std::vector<size_t> GlobalSize(const Arguments<T> &args) { return {args.m, args.n}; }
static std::vector<size_t> LocalSize() { return {1, 1}; }
static std::vector<size_t> LocalSizeRef() { return {8, 8}; }
// Transforms the thread configuration based on the parameters
using TransformVector = std::vector<std::vector<std::string>>;
static TransformVector MulLocal() { return {{"PADTRA_TILE", "PADTRA_TILE"}}; }
static TransformVector DivLocal() { return {}; }
static TransformVector MulGlobal() { return {}; }
static TransformVector DivGlobal() { return {{"PADTRA_WPT", "PADTRA_WPT"}}; }
// Sets the kernel's arguments
static void SetArguments(cltune::Tuner &tuner, const Arguments<T> &args,
std::vector<T> &, std::vector<T> &,
std::vector<T> &a_mat, std::vector<T> &b_mat, std::vector<T> &) {
tuner.AddArgumentScalar(static_cast<int>(args.m));
tuner.AddArgumentScalar(static_cast<int>(args.n));
tuner.AddArgumentScalar(static_cast<int>(args.m));
tuner.AddArgumentScalar(0);
tuner.AddArgumentInput(a_mat);
tuner.AddArgumentScalar(static_cast<int>(args.n));
tuner.AddArgumentScalar(static_cast<int>(args.m));
tuner.AddArgumentScalar(static_cast<int>(args.n));
tuner.AddArgumentScalar(0);
tuner.AddArgumentOutput(b_mat);
tuner.AddArgumentScalar(0);
}
// Describes how to compute the performance metrics
static size_t GetMetric(const Arguments<T> &args) {
return 2 * args.m * args.n * GetBytes(args.precision);
}
static std::string PerformanceUnit() { return "GB/s"; }
};
// =================================================================================================
} // namespace clblast
// Shortcuts to the clblast namespace
using float2 = clblast::float2;
using double2 = clblast::double2;
// Main function (not within the clblast namespace)
int main(int argc, char *argv[]) {
clblast::TunerPadTranspose(argc, argv);
switch(clblast::GetPrecision(argc, argv)) {
case clblast::Precision::kHalf: throw std::runtime_error("Unsupported precision mode");
case clblast::Precision::kSingle: clblast::Tuner<clblast::TunePadTranspose<float>, float>(argc, argv); break;
case clblast::Precision::kDouble: clblast::Tuner<clblast::TunePadTranspose<double>, double>(argc, argv); break;
case clblast::Precision::kComplexSingle: clblast::Tuner<clblast::TunePadTranspose<float2>, float2>(argc, argv); break;
case clblast::Precision::kComplexDouble: clblast::Tuner<clblast::TunePadTranspose<double2>, double2>(argc, argv); break;
}
return 0;
}

View file

@ -7,13 +7,12 @@
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file implements an auto-tuner to tune the transpose OpenCL kernels. It uses CLTune.
// This file uses the CLTune auto-tuner to tune the transpose OpenCL kernels.
//
// =================================================================================================
#include <string>
#include <vector>
#include <stdexcept>
#include "internal/utilities.h"
#include "internal/tuning.h"
@ -21,67 +20,101 @@
namespace clblast {
// =================================================================================================
// The transpose auto-tuner
// See comment at top of file for a description of the class
template <typename T>
void TransposeTune(const Arguments<T> &args,
const std::vector<T> &a_mat, std::vector<T> &b_mat,
cltune::Tuner &tuner) {
class TuneTranspose {
public:
// This points to the PadTransposeMatrix kernel as found in the CLBlast library. This is just one
// example of a transpose kernel. However, all kernels use the same tuning parameters, so one has
// to be chosen as a representative.
std::string sources =
#include "../src/kernels/common.opencl"
#include "../src/kernels/transpose.opencl"
;
auto id = tuner.AddKernelFromString(sources, "TransposeMatrix", {args.m, args.n}, {1, 1});
tuner.SetReferenceFromString(sources, "TransposeMatrix", {args.m, args.n}, {8, 8});
// Sets the tunable parameters and their possible values
tuner.AddParameter(id, "TRA_DIM", {4, 8, 16, 32, 64});
tuner.AddParameter(id, "TRA_WPT", {1, 2, 4, 8, 16});
tuner.AddParameter(id, "TRA_PAD", {0, 1});
tuner.AddParameter(id, "TRA_SHUFFLE", {0, 1});
// Tests for a specific precision
tuner.AddParameter(id, "PRECISION", {static_cast<size_t>(args.precision)});
tuner.AddParameterReference("PRECISION", static_cast<size_t>(args.precision));
// Sets the constraints for local memory size limitations
auto LocalMemorySize = [args] (std::vector<size_t> v) {
return ((v[0]*v[1]*(v[0]*v[1]+v[2]))*GetBytes(args.precision));
};
tuner.SetLocalMemoryUsage(id, LocalMemorySize, {"TRA_DIM", "TRA_WPT", "TRA_PAD"});
// Modifies the thread-sizes (both global and local) based on the parameters
tuner.DivGlobalSize(id, {"TRA_WPT", "TRA_WPT"});
tuner.MulLocalSize(id, {"TRA_DIM", "TRA_DIM"});
// Sets the function's arguments
tuner.AddArgumentScalar(static_cast<int>(args.m));
tuner.AddArgumentInput(a_mat);
tuner.AddArgumentOutput(b_mat);
}
// =================================================================================================
// Main function which calls the common client code with the routine-specific function as argument.
void TunerTranspose(int argc, char *argv[]) {
switch(GetPrecision(argc, argv)) {
case Precision::kHalf: throw std::runtime_error("Unsupported precision mode");
case Precision::kSingle: TunerAB<float>(argc, argv, TransposeTune<float>); break;
case Precision::kDouble: TunerAB<double>(argc, argv, TransposeTune<double>); break;
case Precision::kComplexSingle: TunerAB<float2>(argc, argv, TransposeTune<float2>); break;
case Precision::kComplexDouble: TunerAB<double2>(argc, argv, TransposeTune<double2>); break;
// The representative kernel and the source code
static std::string KernelFamily() { return "transpose"; }
static std::string KernelName() { return "TransposeMatrix"; }
static std::string GetSources() {
return
#include "../src/kernels/common.opencl"
#include "../src/kernels/transpose.opencl"
;
}
}
// The list of arguments relevant for this routine
static std::vector<std::string> GetOptions() { return {kArgM, kArgN}; }
// Tests for valid arguments
static void TestValidArguments(const Arguments<T> &) { }
// Sets the default values for the arguments
static size_t DefaultM() { return 1024; }
static size_t DefaultN() { return 1024; }
static size_t DefaultK() { return 1; } // N/A for this kernel
static double DefaultFraction() { return 1.0; } // N/A for this kernel
// Describes how to obtain the sizes of the buffers
static size_t GetSizeX(const Arguments<T> &) { return 1; } // N/A for this kernel
static size_t GetSizeY(const Arguments<T> &) { return 1; } // N/A for this kernel
static size_t GetSizeA(const Arguments<T> &args) { return args.m * args.n; }
static size_t GetSizeB(const Arguments<T> &args) { return args.m * args.n; }
static size_t GetSizeC(const Arguments<T> &) { return 1; } // N/A for this kernel
// Sets the tuning parameters and their possible values
static void SetParameters(cltune::Tuner &tuner, const size_t id) {
tuner.AddParameter(id, "TRA_DIM", {4, 8, 16, 32, 64});
tuner.AddParameter(id, "TRA_WPT", {1, 2, 4, 8, 16});
tuner.AddParameter(id, "TRA_PAD", {0, 1});
tuner.AddParameter(id, "TRA_SHUFFLE", {0, 1});
}
// Sets the constraints and local memory size
static void SetConstraints(cltune::Tuner &, const size_t) { }
static void SetLocalMemorySize(cltune::Tuner &tuner, const size_t id, const Arguments<T> &args) {
auto LocalMemorySize = [args] (std::vector<size_t> v) {
return ((v[0]*v[1]*(v[0]*v[1]+v[2]))*GetBytes(args.precision));
};
tuner.SetLocalMemoryUsage(id, LocalMemorySize, {"TRA_DIM", "TRA_WPT", "TRA_PAD"});
}
// Sets the base thread configuration
static std::vector<size_t> GlobalSize(const Arguments<T> &args) { return {args.m, args.n}; }
static std::vector<size_t> LocalSize() { return {1, 1}; }
static std::vector<size_t> LocalSizeRef() { return {8, 8}; }
// Transforms the thread configuration based on the parameters
using TransformVector = std::vector<std::vector<std::string>>;
static TransformVector MulLocal() { return {{"TRA_DIM", "TRA_DIM"}}; }
static TransformVector DivLocal() { return {}; }
static TransformVector MulGlobal() { return {}; }
static TransformVector DivGlobal() { return {{"TRA_WPT", "TRA_WPT"}}; }
// Sets the kernel's arguments
static void SetArguments(cltune::Tuner &tuner, const Arguments<T> &args,
std::vector<T> &, std::vector<T> &,
std::vector<T> &a_mat, std::vector<T> &b_mat, std::vector<T> &) {
tuner.AddArgumentScalar(static_cast<int>(args.m));
tuner.AddArgumentInput(a_mat);
tuner.AddArgumentOutput(b_mat);
}
// Describes how to compute the performance metrics
static size_t GetMetric(const Arguments<T> &args) {
return 2 * args.m * args.n * GetBytes(args.precision);
}
static std::string PerformanceUnit() { return "GB/s"; }
};
// =================================================================================================
} // namespace clblast
// Shortcuts to the clblast namespace
using float2 = clblast::float2;
using double2 = clblast::double2;
// Main function (not within the clblast namespace)
int main(int argc, char *argv[]) {
clblast::TunerTranspose(argc, argv);
switch(clblast::GetPrecision(argc, argv)) {
case clblast::Precision::kHalf: throw std::runtime_error("Unsupported precision mode");
case clblast::Precision::kSingle: clblast::Tuner<clblast::TuneTranspose<float>, float>(argc, argv); break;
case clblast::Precision::kDouble: clblast::Tuner<clblast::TuneTranspose<double>, double>(argc, argv); break;
case clblast::Precision::kComplexSingle: clblast::Tuner<clblast::TuneTranspose<float2>, float2>(argc, argv); break;
case clblast::Precision::kComplexDouble: clblast::Tuner<clblast::TuneTranspose<double2>, double2>(argc, argv); break;
}
return 0;
}

View file

@ -1,249 +0,0 @@
// =================================================================================================
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
// width of 100 characters per line.
//
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file implements the common auto-tuning code to interface with the CLTune library.
//
// =================================================================================================
#include <string>
#include <vector>
#include "internal/utilities.h"
#include "internal/tuning.h"
namespace clblast {
// =================================================================================================
// Function to get command-line argument, set-up the input buffers, configure the tuner, and collect
// the results. Used for vector-vector routines.
template <typename T>
void TunerXY(int argc, char* argv[], const Tuner2<T> &tune_function) {
// Sets the parameters and platform/device for which to tune (command-line options)
auto help = std::string{"* Options given/available:\n"};
auto args = Arguments<T>{};
args.platform_id = GetArgument(argc, argv, help, kArgPlatform, size_t{0});
args.device_id = GetArgument(argc, argv, help, kArgDevice, size_t{0});
args.precision = GetArgument(argc, argv, help, kArgPrecision, Precision::kSingle);
args.n = GetArgument(argc, argv, help, kArgN, size_t{4096*1024});
args.alpha = GetArgument(argc, argv, help, kArgAlpha, GetScalar<T>());
fprintf(stdout, "%s\n", help.c_str());
// Creates input buffers with random data
auto x_vec = std::vector<T>(args.n);
auto y_vec = std::vector<T>(args.n);
PopulateVector(x_vec);
PopulateVector(y_vec);
// Initializes the tuner for the chosen device
cltune::Tuner tuner(args.platform_id, args.device_id);
// Use full-search to explore all parameter combinations.
tuner.UseFullSearch();
// Configures the tuning parameters (kernel specific)
tune_function(args, x_vec, y_vec, tuner);
// Starts the tuning process
tuner.Tune();
// Prints the results to screen
auto time_ms = tuner.PrintToScreen();
tuner.PrintFormatted();
// Also prints the performance of the best-case in terms of GB/s
const auto mega_bytes = (3*args.n*GetBytes(args.precision)) * 1.0e-6;
if (time_ms != 0.0) {
printf("[ -------> ] %.1lf ms or %.1lf GB/s\n", time_ms, mega_bytes/time_ms);
}
}
// Compiles the above function
template void TunerXY<float>(int, char**, const Tuner2<float>&);
template void TunerXY<double>(int, char**, const Tuner2<double>&);
template void TunerXY<float2>(int, char**, const Tuner2<float2>&);
template void TunerXY<double2>(int, char**, const Tuner2<double2>&);
// =================================================================================================
// Function to get command-line argument, set-up the input buffers, configure the tuner, and collect
// the results. Used for matrix-vector-vector routines.
template <typename T>
void TunerAXY(int argc, char* argv[], const size_t num_variations,
const Tuner3V<T> &tune_function) {
// Sets the parameters and platform/device for which to tune (command-line options)
auto help = std::string{"* Options given/available:\n"};
auto args = Arguments<T>{};
args.platform_id = GetArgument(argc, argv, help, kArgPlatform, size_t{0});
args.device_id = GetArgument(argc, argv, help, kArgDevice, size_t{0});
args.precision = GetArgument(argc, argv, help, kArgPrecision, Precision::kSingle);
args.m = GetArgument(argc, argv, help, kArgM, size_t{2048});
args.n = GetArgument(argc, argv, help, kArgN, size_t{2048});
args.alpha = GetArgument(argc, argv, help, kArgAlpha, GetScalar<T>());
args.beta = GetArgument(argc, argv, help, kArgBeta, GetScalar<T>());
fprintf(stdout, "%s\n", help.c_str());
// Creates input buffers with random data
auto a_mat = std::vector<T>(args.m * args.n);
auto x_vec = std::vector<T>(args.n);
auto y_vec = std::vector<T>(args.m);
PopulateVector(a_mat);
PopulateVector(x_vec);
PopulateVector(y_vec);
// Loop over the different variations of the kernel
for (auto variation=size_t{1}; variation<=num_variations; ++variation) {
// Initializes the tuner for the chosen device
cltune::Tuner tuner(args.platform_id, args.device_id);
// Use full-search to explore all parameter combinations.
tuner.UseFullSearch();
// Configures the tuning parameters (kernel specific)
tune_function(args, variation, a_mat, x_vec, y_vec, tuner);
// Starts the tuning process
tuner.Tune();
// Prints the results to screen
auto time_ms = tuner.PrintToScreen();
tuner.PrintFormatted();
// Also prints the performance of the best-case in terms of GB/s and GFLOPS
const auto mega_bytes = ((args.m*args.n + 2*args.m + args.n)*GetBytes(args.precision)) * 1.0e-6;
const auto mega_flops = (2*args.m*args.n) * 1.0e-6;
if (time_ms != 0.0) {
printf("[ -------> ] %.1lf ms or %.1lf GB/s or %.1lf GFLOPS\n",
time_ms, mega_bytes/time_ms, mega_flops/time_ms);
}
}
}
// Compiles the above function
template void TunerAXY<float>(int, char**, const size_t, const Tuner3V<float>&);
template void TunerAXY<double>(int, char**, const size_t, const Tuner3V<double>&);
template void TunerAXY<float2>(int, char**, const size_t, const Tuner3V<float2>&);
template void TunerAXY<double2>(int, char**, const size_t, const Tuner3V<double2>&);
// =================================================================================================
// Function to get command-line argument, set-up the input buffers, configure the tuner, and collect
// the results. Used for matrix-matrix routines.
template <typename T>
void TunerAB(int argc, char* argv[], const Tuner2<T> &tune_function) {
// Sets the parameters and platform/device for which to tune (command-line options)
auto help = std::string{"* Options given/available:\n"};
auto args = Arguments<T>{};
args.platform_id = GetArgument(argc, argv, help, kArgPlatform, size_t{0});
args.device_id = GetArgument(argc, argv, help, kArgDevice, size_t{0});
args.precision = GetArgument(argc, argv, help, kArgPrecision, Precision::kSingle);
args.m = GetArgument(argc, argv, help, kArgM, size_t{1024});
args.n = GetArgument(argc, argv, help, kArgN, size_t{1024});
args.fraction = GetArgument(argc, argv, help, kArgFraction, 2048.0);
fprintf(stdout, "%s\n", help.c_str());
// Creates input buffers with random data
auto a_mat = std::vector<T>(args.m * args.n);
auto b_mat = std::vector<T>(args.m * args.n);
PopulateVector(a_mat);
PopulateVector(b_mat);
// Initializes the tuner for the chosen device
cltune::Tuner tuner(args.platform_id, args.device_id);
// Use full-search to explore all parameter combinations.
tuner.UseFullSearch();
// Configures the tuning parameters (kernel specific)
tune_function(args, a_mat, b_mat, tuner);
// Starts the tuning process
tuner.Tune();
// Prints the results to screen
auto time_ms = tuner.PrintToScreen();
tuner.PrintFormatted();
// Also prints the performance of the best-case in terms of GB/s
const auto mega_bytes = (2*args.m*args.n*GetBytes(args.precision)) * 1.0e-6;
if (time_ms != 0.0) {
printf("[ -------> ] %.1lf ms or %.1lf GB/s\n", time_ms, mega_bytes/time_ms);
}
}
// Compiles the above function
template void TunerAB<float>(int, char**, const Tuner2<float>&);
template void TunerAB<double>(int, char**, const Tuner2<double>&);
template void TunerAB<float2>(int, char**, const Tuner2<float2>&);
template void TunerAB<double2>(int, char**, const Tuner2<double2>&);
// =================================================================================================
// Function to get command-line argument, set-up the input buffers, configure the tuner, and collect
// the results. Used for matrix-matrix-matrix routines.
template <typename T>
void TunerABC(int argc, char* argv[], const Tuner3<T> &tune_function) {
// Sets the parameters and platform/device for which to tune (command-line options)
auto help = std::string{"* Options given/available:\n"};
auto args = Arguments<T>{};
args.platform_id = GetArgument(argc, argv, help, kArgPlatform, size_t{0});
args.device_id = GetArgument(argc, argv, help, kArgDevice, size_t{0});
args.precision = GetArgument(argc, argv, help, kArgPrecision, Precision::kSingle);
args.m = GetArgument(argc, argv, help, kArgM, size_t{1024});
args.n = GetArgument(argc, argv, help, kArgN, size_t{1024});
args.k = GetArgument(argc, argv, help, kArgK, size_t{1024});
args.alpha = GetArgument(argc, argv, help, kArgAlpha, GetScalar<T>());
args.beta = GetArgument(argc, argv, help, kArgBeta, GetScalar<T>());
args.fraction = GetArgument(argc, argv, help, kArgFraction, 2048.0);
fprintf(stdout, "%s\n", help.c_str());
// Creates input buffers with random data
auto a_mat = std::vector<T>(args.m * args.k);
auto b_mat = std::vector<T>(args.n * args.k);
auto c_mat = std::vector<T>(args.m * args.n);
PopulateVector(a_mat);
PopulateVector(b_mat);
PopulateVector(c_mat);
// Initializes the tuner for the chosen device
cltune::Tuner tuner(args.platform_id, args.device_id);
// Use random-search to search only a part of the parameter values. The fraction of the search-
// space to explore is set as a command-line argument.
tuner.UseRandomSearch(1.0/args.fraction);
// Configures the tuning parameters (kernel specific)
tune_function(args, a_mat, b_mat, c_mat, tuner);
// Starts the tuning process
tuner.Tune();
// Prints the results to screen
auto time_ms = tuner.PrintToScreen();
tuner.PrintFormatted();
// Also prints the performance of the best-case in terms of GFLOPS
const auto mega_flops = (2*args.m*args.n*args.k) * 1.0e-6;
if (time_ms != 0.0) {
printf("[ -------> ] %.1lf ms or %.1lf GFLOPS\n", time_ms, mega_flops/time_ms);
}
}
// Compiles the above function
template void TunerABC<float>(int, char**, const Tuner3<float>&);
template void TunerABC<double>(int, char**, const Tuner3<double>&);
template void TunerABC<float2>(int, char**, const Tuner3<float2>&);
template void TunerABC<double2>(int, char**, const Tuner3<double2>&);
// =================================================================================================
} // namespace clblast

View file

@ -7,13 +7,12 @@
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file implements an auto-tuner to tune the Xaxpy OpenCL kernel. It uses the CLTune library.
// This file uses the CLTune auto-tuner to tune the xaxpy OpenCL kernels.
//
// =================================================================================================
#include <string>
#include <vector>
#include <stdexcept>
#include "internal/utilities.h"
#include "internal/tuning.h"
@ -21,66 +20,100 @@
namespace clblast {
// =================================================================================================
// The Xaxpy auto-tuner
// See comment at top of file for a description of the class
template <typename T>
void XaxpyTune(const Arguments<T> &args,
const std::vector<T> &x_vec, std::vector<T> &y_vec,
cltune::Tuner &tuner) {
class TuneXaxpy {
public:
// The XaxpyFast kernel only works under certain conditions. Check here whether the condition is
// true for the reference kernel
if (!IsMultiple(args.n, 64)) {
throw std::runtime_error("The 'XaxpyFast' kernel requires 'n' to be a multiple of WGS*WPT*VW");
// The representative kernel and the source code
static std::string KernelFamily() { return "xaxpy"; }
static std::string KernelName() { return "XaxpyFast"; }
static std::string GetSources() {
return
#include "../src/kernels/common.opencl"
#include "../src/kernels/xaxpy.opencl"
;
}
// This points to the XaxpyFast kernel as found in the CLBlast library
std::string sources =
#include "../src/kernels/common.opencl"
#include "../src/kernels/xaxpy.opencl"
;
auto id = tuner.AddKernelFromString(sources, "XaxpyFast", {args.n}, {1});
tuner.SetReferenceFromString(sources, "XaxpyFast", {args.n}, {64});
// The list of arguments relevant for this routine
static std::vector<std::string> GetOptions() { return {kArgN, kArgAlpha}; }
// Sets the tunable parameters and their possible values
tuner.AddParameter(id, "WGS", {64, 128, 256, 512, 1024, 2048});
tuner.AddParameter(id, "WPT", {1, 2, 4, 8});
tuner.AddParameter(id, "VW", {1, 2, 4, 8});
// Tests for a specific precision
tuner.AddParameter(id, "PRECISION", {static_cast<size_t>(args.precision)});
tuner.AddParameterReference("PRECISION", static_cast<size_t>(args.precision));
// Modifies the thread-sizes (local) based on the parameters
tuner.MulLocalSize(id, {"WGS"});
tuner.DivGlobalSize(id, {"WPT"});
tuner.DivGlobalSize(id, {"VW"});
// Sets the function's arguments
tuner.AddArgumentScalar(static_cast<int>(args.n));
tuner.AddArgumentScalar(args.alpha);
tuner.AddArgumentInput(x_vec);
tuner.AddArgumentOutput(y_vec);
}
// =================================================================================================
// Main function which calls the common client code with the routine-specific function as argument.
void TunerXaxpy(int argc, char *argv[]) {
switch(GetPrecision(argc, argv)) {
case Precision::kHalf: throw std::runtime_error("Unsupported precision mode");
case Precision::kSingle: TunerXY<float>(argc, argv, XaxpyTune<float>); break;
case Precision::kDouble: TunerXY<double>(argc, argv, XaxpyTune<double>); break;
case Precision::kComplexSingle: TunerXY<float2>(argc, argv, XaxpyTune<float2>); break;
case Precision::kComplexDouble: TunerXY<double2>(argc, argv, XaxpyTune<double2>); break;
// Tests for valid arguments
static void TestValidArguments(const Arguments<T> &args) {
if (!IsMultiple(args.n, 64)) {
throw std::runtime_error("'XaxpyFast' requires 'n' to be a multiple of WGS*WPT*VW");
}
}
}
// Sets the default values for the arguments
static size_t DefaultM() { return 1; } // N/A for this kernel
static size_t DefaultN() { return 4096*1024; }
static size_t DefaultK() { return 1; } // N/A for this kernel
static double DefaultFraction() { return 1.0; } // N/A for this kernel
// Describes how to obtain the sizes of the buffers
static size_t GetSizeX(const Arguments<T> &args) { return args.n; } // N/A for this kernel
static size_t GetSizeY(const Arguments<T> &args) { return args.n; } // N/A for this kernel
static size_t GetSizeA(const Arguments<T> &) { return 1; } // N/A for this kernel
static size_t GetSizeB(const Arguments<T> &) { return 1; } // N/A for this kernel
static size_t GetSizeC(const Arguments<T> &) { return 1; } // N/A for this kernel
// Sets the tuning parameters and their possible values
static void SetParameters(cltune::Tuner &tuner, const size_t id) {
tuner.AddParameter(id, "WGS", {64, 128, 256, 512, 1024, 2048});
tuner.AddParameter(id, "WPT", {1, 2, 4, 8});
tuner.AddParameter(id, "VW", {1, 2, 4, 8});
}
// Sets the constraints and local memory size
static void SetConstraints(cltune::Tuner &, const size_t) { }
static void SetLocalMemorySize(cltune::Tuner &, const size_t, const Arguments<T> &) { }
// Sets the base thread configuration
static std::vector<size_t> GlobalSize(const Arguments<T> &args) { return {args.n}; }
static std::vector<size_t> LocalSize() { return {1}; }
static std::vector<size_t> LocalSizeRef() { return {64}; }
// Transforms the thread configuration based on the parameters
using TransformVector = std::vector<std::vector<std::string>>;
static TransformVector MulLocal() { return {{"WGS"}}; }
static TransformVector DivLocal() { return {}; }
static TransformVector MulGlobal() { return {}; }
static TransformVector DivGlobal() { return {{"WPT"},{"VW"}}; }
// Sets the kernel's arguments
static void SetArguments(cltune::Tuner &tuner, const Arguments<T> &args,
std::vector<T> &x_vec, std::vector<T> &y_vec,
std::vector<T> &, std::vector<T> &, std::vector<T> &) {
tuner.AddArgumentScalar(static_cast<int>(args.n));
tuner.AddArgumentScalar(args.alpha);
tuner.AddArgumentInput(x_vec);
tuner.AddArgumentOutput(y_vec);
}
// Describes how to compute the performance metrics
static size_t GetMetric(const Arguments<T> &args) {
return 3 * args.n * GetBytes(args.precision);
}
static std::string PerformanceUnit() { return "GB/s"; }
};
// =================================================================================================
} // namespace clblast
// Shortcuts to the clblast namespace
using float2 = clblast::float2;
using double2 = clblast::double2;
// Main function (not within the clblast namespace)
int main(int argc, char *argv[]) {
clblast::TunerXaxpy(argc, argv);
switch(clblast::GetPrecision(argc, argv)) {
case clblast::Precision::kHalf: throw std::runtime_error("Unsupported precision mode");
case clblast::Precision::kSingle: clblast::Tuner<clblast::TuneXaxpy<float>, float>(argc, argv); break;
case clblast::Precision::kDouble: clblast::Tuner<clblast::TuneXaxpy<double>, double>(argc, argv); break;
case clblast::Precision::kComplexSingle: clblast::Tuner<clblast::TuneXaxpy<float2>, float2>(argc, argv); break;
case clblast::Precision::kComplexDouble: clblast::Tuner<clblast::TuneXaxpy<double2>, double2>(argc, argv); break;
}
return 0;
}

View file

@ -7,15 +7,12 @@
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file implements an auto-tuner to tune the Xgemm OpenCL kernel. It uses the CLTune library.
// Note that this tuner uses random-search: running it multiple times or with a larger fraction
// argument might be neccessary to obtain good results.
// This file uses the CLTune auto-tuner to tune the xgemm OpenCL kernels.
//
// =================================================================================================
#include <string>
#include <vector>
#include <stdexcept>
#include "internal/utilities.h"
#include "internal/tuning.h"
@ -23,102 +20,136 @@
namespace clblast {
// =================================================================================================
// The Xgemm auto-tuner
// See comment at top of file for a description of the class
template <typename T>
void XgemmTune(const Arguments<T> &args,
const std::vector<T> &a_mat, const std::vector<T> &b_mat, std::vector<T> &c_mat,
cltune::Tuner &tuner) {
class TuneXgemm {
public:
// This points to the Xgemm kernel as found in the CLBlast library and its golden reference
std::string sources =
#include "../src/kernels/common.opencl"
#include "../src/kernels/xgemm.opencl"
;
auto id = tuner.AddKernelFromString(sources, "Xgemm", {args.m, args.n}, {1, 1});
tuner.SetReferenceFromString(sources, "Xgemm", {args.m, args.n}, {8, 8});
// Sets the tunable parameters and their possible values
tuner.AddParameter(id, "MWG", {16, 32, 64, 128});
tuner.AddParameter(id, "NWG", {16, 32, 64, 128});
tuner.AddParameter(id, "KWG", {16, 32});
tuner.AddParameter(id, "MDIMC", {8, 16, 32});
tuner.AddParameter(id, "NDIMC", {8, 16, 32});
tuner.AddParameter(id, "MDIMA", {8, 16, 32});
tuner.AddParameter(id, "NDIMB", {8, 16, 32});
tuner.AddParameter(id, "KWI", {2, 8});
tuner.AddParameter(id, "VWM", {1, 2, 4, 8});
tuner.AddParameter(id, "VWN", {1, 2, 4, 8});
tuner.AddParameter(id, "STRM", {0, 1});
tuner.AddParameter(id, "STRN", {0, 1});
tuner.AddParameter(id, "SA", {0, 1});
tuner.AddParameter(id, "SB", {0, 1});
// Tests for a specific precision
tuner.AddParameter(id, "PRECISION", {static_cast<size_t>(args.precision)});
tuner.AddParameterReference("PRECISION", static_cast<size_t>(args.precision));
// Sets the helper functions to implement the constraints below
auto MultipleOfX = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]); };
auto MultipleOfXMulY = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]*v[2]); };
auto MultipleOfXMulYDivZ = [] (std::vector<size_t> v) { return IsMultiple(v[0], (v[1]*v[2])/v[3]); };
// Sets constraints: Requirement for unrolling the KWG loop
tuner.AddConstraint(id, MultipleOfX, {"KWG", "KWI"});
// Sets constraints: Required for integer MWI and NWI
tuner.AddConstraint(id, MultipleOfXMulY, {"MWG", "MDIMC", "VWM"});
tuner.AddConstraint(id, MultipleOfXMulY, {"NWG", "NDIMC", "VWN"});
// Sets constraints: Required for integer MWIA and NWIB
tuner.AddConstraint(id, MultipleOfXMulY, {"MWG", "MDIMA", "VWM"});
tuner.AddConstraint(id, MultipleOfXMulY, {"NWG", "NDIMB", "VWN"});
// Sets constraints: KWG has to be a multiple of KDIMA = ((MDIMC*NDIMC)/(MDIMA)) and KDIMB = (...)
tuner.AddConstraint(id, MultipleOfXMulYDivZ, {"KWG", "MDIMC", "NDIMC", "MDIMA"});
tuner.AddConstraint(id, MultipleOfXMulYDivZ, {"KWG", "MDIMC", "NDIMC", "NDIMB"});
// Sets the constraints for local memory size limitations
auto LocalMemorySize = [args] (std::vector<size_t> v) {
return (((v[0]*v[1]*v[2]/v[3]) + (v[4]*v[5]*v[6]/v[7]))*GetBytes(args.precision));
};
tuner.SetLocalMemoryUsage(id, LocalMemorySize, {"SA", "KWG", "MWG", "VWM",
"SB", "KWG", "NWG", "VWN"});
// Modifies the thread-sizes (both global and local) based on the parameters
tuner.MulLocalSize(id, {"MDIMC", "NDIMC"});
tuner.MulGlobalSize(id, {"MDIMC", "NDIMC"});
tuner.DivGlobalSize(id, {"MWG", "NWG"});
// Sets the function's arguments
tuner.AddArgumentScalar(static_cast<int>(args.m));
tuner.AddArgumentScalar(static_cast<int>(args.n));
tuner.AddArgumentScalar(static_cast<int>(args.k));
tuner.AddArgumentScalar(args.alpha);
tuner.AddArgumentScalar(args.beta);
tuner.AddArgumentInput(a_mat);
tuner.AddArgumentInput(b_mat);
tuner.AddArgumentOutput(c_mat);
}
// =================================================================================================
// Main function which calls the common client code with the routine-specific function as argument.
void TunerXgemm(int argc, char *argv[]) {
switch(GetPrecision(argc, argv)) {
case Precision::kHalf: throw std::runtime_error("Unsupported precision mode");
case Precision::kSingle: TunerABC<float>(argc, argv, XgemmTune<float>); break;
case Precision::kDouble: TunerABC<double>(argc, argv, XgemmTune<double>); break;
case Precision::kComplexSingle: TunerABC<float2>(argc, argv, XgemmTune<float2>); break;
case Precision::kComplexDouble: TunerABC<double2>(argc, argv, XgemmTune<double2>); break;
// The representative kernel and the source code
static std::string KernelFamily() { return "xgemm"; }
static std::string KernelName() { return "Xgemm"; }
static std::string GetSources() {
return
#include "../src/kernels/common.opencl"
#include "../src/kernels/xgemm.opencl"
;
}
}
// The list of arguments relevant for this routine
static std::vector<std::string> GetOptions() {
return {kArgM, kArgN, kArgK, kArgAlpha, kArgBeta, kArgFraction};
}
// Tests for valid arguments
static void TestValidArguments(const Arguments<T> &) { }
// Sets the default values for the arguments
static size_t DefaultM() { return 1024; }
static size_t DefaultN() { return 1024; }
static size_t DefaultK() { return 1024; }
static double DefaultFraction() { return 2048.0; }
// Describes how to obtain the sizes of the buffers
static size_t GetSizeX(const Arguments<T> &) { return 1; } // N/A for this kernel
static size_t GetSizeY(const Arguments<T> &) { return 1; } // N/A for this kernel
static size_t GetSizeA(const Arguments<T> &args) { return args.m * args.k; }
static size_t GetSizeB(const Arguments<T> &args) { return args.n * args.k; }
static size_t GetSizeC(const Arguments<T> &args) { return args.m * args.n; }
// Sets the tuning parameters and their possible values
static void SetParameters(cltune::Tuner &tuner, const size_t id) {
tuner.AddParameter(id, "MWG", {16, 32, 64, 128});
tuner.AddParameter(id, "NWG", {16, 32, 64, 128});
tuner.AddParameter(id, "KWG", {16, 32});
tuner.AddParameter(id, "MDIMC", {8, 16, 32});
tuner.AddParameter(id, "NDIMC", {8, 16, 32});
tuner.AddParameter(id, "MDIMA", {8, 16, 32});
tuner.AddParameter(id, "NDIMB", {8, 16, 32});
tuner.AddParameter(id, "KWI", {2, 8});
tuner.AddParameter(id, "VWM", {1, 2, 4, 8});
tuner.AddParameter(id, "VWN", {1, 2, 4, 8});
tuner.AddParameter(id, "STRM", {0, 1});
tuner.AddParameter(id, "STRN", {0, 1});
tuner.AddParameter(id, "SA", {0, 1});
tuner.AddParameter(id, "SB", {0, 1});
}
// Sets the constraints
static void SetConstraints(cltune::Tuner &tuner, const size_t id) {
auto MultipleOfX = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]); };
auto MultipleOfXMulY = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]*v[2]); };
auto MultipleOfXMulYDivZ = [] (std::vector<size_t> v) { return IsMultiple(v[0], (v[1]*v[2])/v[3]); };
// Requirement for unrolling the KWG loop
tuner.AddConstraint(id, MultipleOfX, {"KWG", "KWI"});
// Required for integer MWI and NWI
tuner.AddConstraint(id, MultipleOfXMulY, {"MWG", "MDIMC", "VWM"});
tuner.AddConstraint(id, MultipleOfXMulY, {"NWG", "NDIMC", "VWN"});
// Required for integer MWIA and NWIB
tuner.AddConstraint(id, MultipleOfXMulY, {"MWG", "MDIMA", "VWM"});
tuner.AddConstraint(id, MultipleOfXMulY, {"NWG", "NDIMB", "VWN"});
// KWG has to be a multiple of KDIMA = ((MDIMC*NDIMC)/(MDIMA)) and KDIMB = (...)
tuner.AddConstraint(id, MultipleOfXMulYDivZ, {"KWG", "MDIMC", "NDIMC", "MDIMA"});
tuner.AddConstraint(id, MultipleOfXMulYDivZ, {"KWG", "MDIMC", "NDIMC", "NDIMB"});
}
// Sets the local memory size
static void SetLocalMemorySize(cltune::Tuner &tuner, const size_t id, const Arguments<T> &args) {
auto LocalMemorySize = [args] (std::vector<size_t> v) {
return (((v[0]*v[1]*v[2]/v[3]) + (v[4]*v[5]*v[6]/v[7]))*GetBytes(args.precision));
};
tuner.SetLocalMemoryUsage(id, LocalMemorySize, {"SA", "KWG", "MWG", "VWM",
"SB", "KWG", "NWG", "VWN"});
}
// Sets the base thread configuration
static std::vector<size_t> GlobalSize(const Arguments<T> &args) { return {args.m, args.n}; }
static std::vector<size_t> LocalSize() { return {1, 1}; }
static std::vector<size_t> LocalSizeRef() { return {8, 8}; }
// Transforms the thread configuration based on the parameters
using TransformVector = std::vector<std::vector<std::string>>;
static TransformVector MulLocal() { return {{"MDIMC", "NDIMC"}}; }
static TransformVector DivLocal() { return {}; }
static TransformVector MulGlobal() { return {{"MDIMC", "NDIMC"}}; }
static TransformVector DivGlobal() { return {{"MWG", "NWG"}}; }
// Sets the kernel's arguments
static void SetArguments(cltune::Tuner &tuner, const Arguments<T> &args,
std::vector<T> &, std::vector<T> &,
std::vector<T> &a_mat, std::vector<T> &b_mat, std::vector<T> &c_mat) {
tuner.AddArgumentScalar(static_cast<int>(args.m));
tuner.AddArgumentScalar(static_cast<int>(args.n));
tuner.AddArgumentScalar(static_cast<int>(args.k));
tuner.AddArgumentScalar(args.alpha);
tuner.AddArgumentScalar(args.beta);
tuner.AddArgumentInput(a_mat);
tuner.AddArgumentInput(b_mat);
tuner.AddArgumentOutput(c_mat);
}
// Describes how to compute the performance metrics
static size_t GetMetric(const Arguments<T> &args) {
return 2 * args.m * args.n * args.k;
}
static std::string PerformanceUnit() { return "GFLOPS"; }
};
// =================================================================================================
} // namespace clblast
// Shortcuts to the clblast namespace
using float2 = clblast::float2;
using double2 = clblast::double2;
// Main function (not within the clblast namespace)
int main(int argc, char *argv[]) {
clblast::TunerXgemm(argc, argv);
switch(clblast::GetPrecision(argc, argv)) {
case clblast::Precision::kHalf: throw std::runtime_error("Unsupported precision mode");
case clblast::Precision::kSingle: clblast::Tuner<clblast::TuneXgemm<float>, float>(argc, argv); break;
case clblast::Precision::kDouble: clblast::Tuner<clblast::TuneXgemm<double>, double>(argc, argv); break;
case clblast::Precision::kComplexSingle: clblast::Tuner<clblast::TuneXgemm<float2>, float2>(argc, argv); break;
case clblast::Precision::kComplexDouble: clblast::Tuner<clblast::TuneXgemm<double2>, double2>(argc, argv); break;
}
return 0;
}

View file

@ -7,8 +7,7 @@
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file implements an auto-tuner to tune the Xgemv OpenCL kernel. It uses the CLTune library.
// Three variations of the kernel are tuned:
// This file uses the CLTune auto-tuner to tune the xgemv OpenCL kernels. Three variants are tuned:
// 1: The full version of the kernel
// 2: The fast version for non-transposed matrices
// 3: The fast version for transposed matrices
@ -17,7 +16,6 @@
#include <string>
#include <vector>
#include <stdexcept>
#include "internal/utilities.h"
#include "internal/tuning.h"
@ -25,93 +23,121 @@
namespace clblast {
// =================================================================================================
// The Xgemv auto-tuner
template <typename T>
void XgemvTune(const Arguments<T> &args, const size_t variation,
const std::vector<T> &a_mat, const std::vector<T> &x_vec, std::vector<T> &y_vec,
cltune::Tuner &tuner) {
// See comment at top of file for a description of the class
template <typename T, int V>
class TuneXgemv {
public:
// Sets the kernel name and the layout argument
auto kernel_name = (variation == 1) ? "Xgemv" : ((variation == 2) ? "XgemvFast" : "XgemvFastRot");
auto a_rotated = (variation == 3) ? 1 : 0;
// This points to the Xgemv kernel as found in the CLBlast library
std::string sources =
#include "../src/kernels/common.opencl"
#include "../src/kernels/xgemv.opencl"
;
auto id = tuner.AddKernelFromString(sources, kernel_name, {args.m}, {1});
tuner.SetReferenceFromString(sources, "Xgemv", {args.m}, {64});
// Helper for the constraints
auto MultipleOfX = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]); };
// Sets the tunable parameters, their possible values, the adjusted thread sizes, and constraints
if (variation == 1) {
tuner.AddParameter(id, "WGS1", {64, 128, 256, 512, 1024, 1536, 2048});
tuner.AddParameter(id, "WPT1", {1, 2, 4, 8});
tuner.MulLocalSize(id, {"WGS1"});
tuner.DivGlobalSize(id, {"WPT1"});
}
else if (variation == 2) {
tuner.AddParameter(id, "WGS2", {64, 128, 256, 512, 1024, 1536, 2048});
tuner.AddParameter(id, "WPT2", {1, 2, 4, 8});
tuner.AddParameter(id, "VW2", {1, 2, 4, 8});
tuner.MulLocalSize(id, {"WGS2"});
tuner.DivGlobalSize(id, {"WPT2"});
tuner.AddConstraint(id, MultipleOfX, {"WPT2", "VW2"});
}
else if (variation == 3) {
tuner.AddParameter(id, "WGS3", {64, 128, 256, 512, 1024, 1536, 2048});
tuner.AddParameter(id, "WPT3", {1, 2, 4, 8});
tuner.AddParameter(id, "VW3", {1, 2, 4, 8});
tuner.MulLocalSize(id, {"WGS3"});
tuner.DivGlobalSize(id, {"WPT3"});
tuner.AddConstraint(id, MultipleOfX, {"WGS3", "VW3"});
// The representative kernel and the source code
static std::string KernelFamily() { return "xgemv_"+std::to_string(V); }
static std::string KernelName() { return (V==1) ? "Xgemv" : ((V==2) ? "XgemvFast" : "XgemvFastRot"); }
static std::string GetSources() {
return
#include "../src/kernels/common.opencl"
#include "../src/kernels/xgemv.opencl"
;
}
// Tests for a specific precision
tuner.AddParameter(id, "PRECISION", {static_cast<size_t>(args.precision)});
tuner.AddParameterReference("PRECISION", static_cast<size_t>(args.precision));
// The list of arguments relevant for this routine
static std::vector<std::string> GetOptions() { return {kArgM, kArgN, kArgAlpha, kArgBeta}; }
// Sets the function's arguments
tuner.AddArgumentScalar(static_cast<int>(args.m));
tuner.AddArgumentScalar(static_cast<int>(args.n));
tuner.AddArgumentScalar(args.alpha);
tuner.AddArgumentScalar(args.beta);
tuner.AddArgumentScalar(static_cast<int>(a_rotated));
tuner.AddArgumentInput(a_mat);
tuner.AddArgumentScalar(0);
tuner.AddArgumentScalar(static_cast<int>(args.m));
tuner.AddArgumentInput(x_vec);
tuner.AddArgumentScalar(0);
tuner.AddArgumentScalar(1);
tuner.AddArgumentOutput(y_vec);
tuner.AddArgumentScalar(0);
tuner.AddArgumentScalar(1);
tuner.AddArgumentScalar(0); // Conjugate transpose
}
// Tests for valid arguments
static void TestValidArguments(const Arguments<T> &) { }
// =================================================================================================
// Sets the default values for the arguments
static size_t DefaultM() { return 2048; }
static size_t DefaultN() { return 2048; }
static size_t DefaultK() { return 1; } // N/A for this kernel
static double DefaultFraction() { return 1.0; } // N/A for this kernel
// Main function which calls the common client code with the routine-specific function as argument.
void TunerXgemv(int argc, char *argv[]) {
auto num_variations = size_t{3};
switch(GetPrecision(argc, argv)) {
case Precision::kHalf: throw std::runtime_error("Unsupported precision mode");
case Precision::kSingle: TunerAXY<float>(argc, argv, num_variations, XgemvTune<float>); break;
case Precision::kDouble: TunerAXY<double>(argc, argv, num_variations, XgemvTune<double>); break;
case Precision::kComplexSingle: TunerAXY<float2>(argc, argv, num_variations, XgemvTune<float2>); break;
case Precision::kComplexDouble: TunerAXY<double2>(argc, argv, num_variations, XgemvTune<double2>); break;
// Describes how to obtain the sizes of the buffers
static size_t GetSizeX(const Arguments<T> &args) { return args.n; }
static size_t GetSizeY(const Arguments<T> &args) { return args.m; }
static size_t GetSizeA(const Arguments<T> &args) { return args.m * args.n; }
static size_t GetSizeB(const Arguments<T> &) { return 1; } // N/A for this kernel
static size_t GetSizeC(const Arguments<T> &) { return 1; } // N/A for this kernel
// Sets the tuning parameters and their possible values
static void SetParameters(cltune::Tuner &tuner, const size_t id) {
tuner.AddParameter(id, "WGS"+std::to_string(V), {64, 128, 256, 512, 1024, 1536, 2048});
tuner.AddParameter(id, "WPT"+std::to_string(V), {1, 2, 4, 8});
if (V==2 || V==3) { tuner.AddParameter(id, "VW"+std::to_string(V), {1, 2, 4, 8}); }
}
}
// Sets the constraints and local memory size
static void SetConstraints(cltune::Tuner &tuner, const size_t id) {
auto MultipleOfX = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]); };
if (V==2 || V==3) {
tuner.AddConstraint(id, MultipleOfX, {"WPT"+std::to_string(V), "VW"+std::to_string(V)});
}
}
static void SetLocalMemorySize(cltune::Tuner &, const size_t, const Arguments<T> &) { }
// Sets the base thread configuration
static std::vector<size_t> GlobalSize(const Arguments<T> &args) { return {args.m}; }
static std::vector<size_t> LocalSize() { return {1}; }
static std::vector<size_t> LocalSizeRef() { return {64}; }
// Transforms the thread configuration based on the parameters
using TransformVector = std::vector<std::vector<std::string>>;
static TransformVector MulLocal() { return {{"WGS"+std::to_string(V)}}; }
static TransformVector DivLocal() { return {}; }
static TransformVector MulGlobal() { return {}; }
static TransformVector DivGlobal() { return {{"WPT"+std::to_string(V)}}; }
// Sets the kernel's arguments
static void SetArguments(cltune::Tuner &tuner, const Arguments<T> &args,
std::vector<T> &x_vec, std::vector<T> &y_vec,
std::vector<T> &a_mat, std::vector<T> &, std::vector<T> &) {
auto a_rotated = (V==3) ? 1 : 0;
tuner.AddArgumentScalar(static_cast<int>(args.m));
tuner.AddArgumentScalar(static_cast<int>(args.n));
tuner.AddArgumentScalar(args.alpha);
tuner.AddArgumentScalar(args.beta);
tuner.AddArgumentScalar(static_cast<int>(a_rotated));
tuner.AddArgumentInput(a_mat);
tuner.AddArgumentScalar(0);
tuner.AddArgumentScalar(static_cast<int>(args.m));
tuner.AddArgumentInput(x_vec);
tuner.AddArgumentScalar(0);
tuner.AddArgumentScalar(1);
tuner.AddArgumentOutput(y_vec);
tuner.AddArgumentScalar(0);
tuner.AddArgumentScalar(1);
tuner.AddArgumentScalar(0); // Conjugate transpose
}
// Describes how to compute the performance metrics
static size_t GetMetric(const Arguments<T> &args) {
return (args.m*args.n + 2*args.m + args.n) * GetBytes(args.precision);
}
static std::string PerformanceUnit() { return "GB/s"; }
};
// =================================================================================================
} // namespace clblast
// Shortcuts to the clblast namespace
using float2 = clblast::float2;
using double2 = clblast::double2;
// Function to tune a specific variation V (not within the clblast namespace)
template <int V>
void StartVariation(int argc, char *argv[]) {
switch(clblast::GetPrecision(argc, argv)) {
case clblast::Precision::kHalf: throw std::runtime_error("Unsupported precision mode");
case clblast::Precision::kSingle: clblast::Tuner<clblast::TuneXgemv<float,V>, float>(argc, argv); break;
case clblast::Precision::kDouble: clblast::Tuner<clblast::TuneXgemv<double,V>, double>(argc, argv); break;
case clblast::Precision::kComplexSingle: clblast::Tuner<clblast::TuneXgemv<float2,V>, float2>(argc, argv); break;
case clblast::Precision::kComplexDouble: clblast::Tuner<clblast::TuneXgemv<double2,V>, double2>(argc, argv); break;
}
}
// Main function (not within the clblast namespace)
int main(int argc, char *argv[]) {
clblast::TunerXgemv(argc, argv);
StartVariation<1>(argc, argv);
StartVariation<2>(argc, argv);
StartVariation<3>(argc, argv);
return 0;
}

View file

@ -270,5 +270,19 @@ size_t GetBytes(const Precision precision) {
}
}
// =================================================================================================
// Returns false is this precision is not supported by the device
template <> bool PrecisionSupported<float>(const Device &) { return true; }
template <> bool PrecisionSupported<float2>(const Device &) { return true; }
template <> bool PrecisionSupported<double>(const Device &device) {
auto extensions = device.Capabilities();
return (extensions.find(kKhronosDoublePrecision) == std::string::npos) ? false : true;
}
template <> bool PrecisionSupported<double2>(const Device &device) {
auto extensions = device.Capabilities();
return (extensions.find(kKhronosDoublePrecision) == std::string::npos) ? false : true;
}
// =================================================================================================
} // namespace clblast

View file

@ -335,20 +335,6 @@ template <> const std::vector<double2> GetExampleScalars(const bool full_test) {
// =================================================================================================
// Returns false is this precision is not supported by the device
template <> bool PrecisionSupported<float>(const Device &) { return true; }
template <> bool PrecisionSupported<float2>(const Device &) { return true; }
template <> bool PrecisionSupported<double>(const Device &device) {
auto extensions = device.Capabilities();
return (extensions.find(kKhronosDoublePrecision) == std::string::npos) ? false : true;
}
template <> bool PrecisionSupported<double2>(const Device &device) {
auto extensions = device.Capabilities();
return (extensions.find(kKhronosDoublePrecision) == std::string::npos) ? false : true;
}
// =================================================================================================
// Compiles the templated class
template class Tester<float, float>;
template class Tester<double, double>;

View file

@ -140,10 +140,6 @@ bool TestSimilarity(const T val1, const T val2);
template <typename T>
const std::vector<T> GetExampleScalars(const bool full_test);
// Returns false is this precision is not supported by the device
template <typename T>
bool PrecisionSupported(const Device &device);
// =================================================================================================
} // namespace clblast