Added support for the SASUM/DASUM/ScASUM/DzASUM routines

This commit is contained in:
cnugteren 2016-04-14 19:58:26 -06:00
parent e0497807e2
commit 8be99de82d
14 changed files with 455 additions and 19 deletions

View file

@ -121,7 +121,7 @@ include_directories(${clblast_SOURCE_DIR}/include ${OPENCL_INCLUDE_DIRS})
set(KERNELS copy pad transpose padtranspose xaxpy xdot xger xgemm xgemv)
set(SAMPLE_PROGRAMS_CPP sgemm)
set(SAMPLE_PROGRAMS_C sgemm)
set(LEVEL1_ROUTINES xswap xscal xcopy xaxpy xdot xdotu xdotc xnrm2)
set(LEVEL1_ROUTINES xswap xscal xcopy xaxpy xdot xdotu xdotc xnrm2 xasum)
set(LEVEL2_ROUTINES xgemv xgbmv xhemv xhbmv xhpmv xsymv xsbmv xspmv xtrmv xtbmv xtpmv
xger xgeru xgerc xher xhpr xher2 xhpr2 xsyr xspr xsyr2 xspr2)
set(LEVEL3_ROUTINES xgemm xsymm xhemm xsyrk xherk xsyr2k xher2k xtrmm)

View file

@ -185,7 +185,7 @@ CLBlast is in active development but already supports almost all the BLAS routin
| xDOTU | - | - | ✔ | ✔ | |
| xDOTC | - | - | ✔ | ✔ | |
| xNRM2 | ✔ | ✔ | ✔ | ✔ | |
| xASUM | | | | | |
| xASUM | ✔ | ✔ | ✔ | ✔ | |
| IxAMAX | | | | | |
| Level-2 | S | D | C | Z | Notes |

View file

@ -0,0 +1,56 @@
// =================================================================================================
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
// width of 100 characters per line.
//
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file implements the Xasum routine. The precision is implemented using a template argument.
//
// =================================================================================================
#ifndef CLBLAST_ROUTINES_XASUM_H_
#define CLBLAST_ROUTINES_XASUM_H_
#include "internal/routine.h"
namespace clblast {
// =================================================================================================
// See comment at top of file for a description of the class
template <typename T>
class Xasum: public Routine<T> {
public:
// Members and methods from the base class
using Routine<T>::db_;
using Routine<T>::source_string_;
using Routine<T>::queue_;
using Routine<T>::event_;
using Routine<T>::context_;
using Routine<T>::GetProgramFromCache;
using Routine<T>::TestVectorX;
using Routine<T>::TestVectorDot;
using Routine<T>::RunKernel;
using Routine<T>::ErrorIn;
// Constructor
Xasum(Queue &queue, EventPointer event, const std::string &name = "ASUM");
// Templated-precision implementation of the routine
StatusCode DoAsum(const size_t n,
const Buffer<T> &asum_buffer, const size_t asum_offset,
const Buffer<T> &x_buffer, const size_t x_offset, const size_t x_inc);
private:
// Static variable to get the precision
const static Precision precision_;
};
// =================================================================================================
} // namespace clblast
// CLBLAST_ROUTINES_XASUM_H_
#endif

View file

@ -65,6 +65,7 @@ constexpr auto kArgCOffset = "offc";
constexpr auto kArgAPOffset = "offap";
constexpr auto kArgDotOffset = "offdot";
constexpr auto kArgNrm2Offset = "offnrm2";
constexpr auto kArgAsumOffset = "offasum";
constexpr auto kArgAlpha = "alpha";
constexpr auto kArgBeta = "beta";
@ -119,6 +120,7 @@ struct Arguments {
size_t ap_offset = 0;
size_t dot_offset = 0;
size_t nrm2_offset = 0;
size_t asum_offset = 0;
T alpha = T{1.0};
T beta = T{1.0};
size_t x_size = 1;

View file

@ -67,7 +67,7 @@ routines = [
Routine(True, "1", "dotu", T, [C,Z], ["n"], [], ["x","y"], ["dot"], [], "n", "Dot product of two complex vectors"),
Routine(True, "1", "dotc", T, [C,Z], ["n"], [], ["x","y"], ["dot"], [], "n", "Dot product of two complex vectors, one conjugated"),
Routine(True, "1", "nrm2", T, [S,D,Sc,Dz],["n"], [], ["x"], ["nrm2"], [], "2*n", "Euclidian norm of a vector"),
Routine(False, "1", "asum", T, [S,D,Sc,Dz],["n"], [], ["x"], ["asum"], [], "n", "Absolute sum of values in a vector"),
Routine(True, "1", "asum", T, [S,D,Sc,Dz],["n"], [], ["x"], ["asum"], [], "n", "Absolute sum of values in a vector"),
],
[ # Level 2: matrix-vector
Routine(True, "2a", "gemv", T, [S,D,C,Z], ["m","n"], ["layout","a_transpose"], ["a","x"], ["y"], ["alpha","beta"], "", "General matrix-vector multiplication"),
@ -289,7 +289,7 @@ files = [
path_clblast+"/test/wrapper_clblas.h",
path_clblast+"/test/wrapper_cblas.h",
]
header_lines = [84, 65, 93, 22, 22, 38]
header_lines = [84, 66, 93, 22, 22, 38]
footer_lines = [6, 3, 9, 2, 6, 6]
# Checks whether the command-line arguments are valid; exists otherwise

View file

@ -27,6 +27,7 @@
#include "internal/routines/level1/xdotu.h"
#include "internal/routines/level1/xdotc.h"
#include "internal/routines/level1/xnrm2.h"
#include "internal/routines/level1/xasum.h"
// BLAS level-2 includes
#include "internal/routines/level2/xgemv.h"
@ -398,11 +399,17 @@ template StatusCode PUBLIC_API Nrm2<double2>(const size_t,
// Absolute sum of values in a vector: SASUM/DASUM/ScASUM/DzASUM
template <typename T>
StatusCode Asum(const size_t,
cl_mem, const size_t,
const cl_mem, const size_t, const size_t,
cl_command_queue*, cl_event*) {
return StatusCode::kNotImplemented;
StatusCode Asum(const size_t n,
cl_mem asum_buffer, const size_t asum_offset,
const cl_mem x_buffer, const size_t x_offset, const size_t x_inc,
cl_command_queue* queue, cl_event* event) {
auto queue_cpp = Queue(*queue);
auto routine = Xasum<T>(queue_cpp, event);
auto status = routine.SetUp();
if (status != StatusCode::kSuccess) { return status; }
return routine.DoAsum(n,
Buffer<T>(asum_buffer), asum_offset,
Buffer<T>(x_buffer), x_offset, x_inc);
}
template StatusCode PUBLIC_API Asum<float>(const size_t,
cl_mem, const size_t,

View file

@ -109,6 +109,13 @@ R"(
#define SetToOne(a) a = ONE
#endif
// The absolute value (component-wise)
#if PRECISION == 3232 || PRECISION == 6464
#define AbsoluteValue(value) value.x = fabs(value.x); value.y = fabs(value.y)
#else
#define AbsoluteValue(value) value = fabs(value)
#endif
// Adds two complex variables
#if PRECISION == 3232 || PRECISION == 6464
#define Add(c, a, b) c.x = a.x + b.x; c.y = a.y + b.y

View file

@ -0,0 +1,108 @@
// =================================================================================================
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
// width of 100 characters per line.
//
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file contains the Xasum kernel. It implements a absolute sum computation using reduction
// kernels. Reduction is split in two parts. In the first (main) kernel the X vector is loaded,
// followed by a per-thread and a per-workgroup reduction. The second (epilogue) kernel
// is executed with a single workgroup only, computing the final result.
//
// =================================================================================================
// Enables loading of this file using the C++ pre-processor's #include (C++11 standard raw string
// literal). Comment-out this line for syntax-highlighting when developing.
R"(
// Parameters set by the tuner or by the database. Here they are given a basic default value in case
// this kernel file is used outside of the CLBlast library.
#ifndef WGS1
#define WGS1 64 // The local work-group size of the main kernel
#endif
#ifndef WGS2
#define WGS2 64 // The local work-group size of the epilogue kernel
#endif
// =================================================================================================
// The main reduction kernel, performing the loading and the majority of the operation
__attribute__((reqd_work_group_size(WGS1, 1, 1)))
__kernel void Xasum(const int n,
const __global real* restrict xgm, const int x_offset, const int x_inc,
__global real* output) {
__local real lm[WGS1];
const int lid = get_local_id(0);
const int wgid = get_group_id(0);
const int num_groups = get_num_groups(0);
// Performs loading and the first steps of the reduction
real acc;
SetToZero(acc);
int id = wgid*WGS1 + lid;
while (id < n) {
real x = xgm[id*x_inc + x_offset];
AbsoluteValue(x);
Add(acc, acc, x);
id += WGS1*num_groups;
}
lm[lid] = acc;
barrier(CLK_LOCAL_MEM_FENCE);
// Performs reduction in local memory
#pragma unroll
for (int s=WGS1/2; s>0; s=s>>1) {
if (lid < s) {
Add(lm[lid], lm[lid], lm[lid + s]);
}
barrier(CLK_LOCAL_MEM_FENCE);
}
// Stores the per-workgroup result
if (lid == 0) {
output[wgid] = lm[0];
}
}
// =================================================================================================
// The epilogue reduction kernel, performing the final bit of the operation. This kernel has to
// be launched with a single workgroup only.
__attribute__((reqd_work_group_size(WGS2, 1, 1)))
__kernel void XasumEpilogue(const __global real* restrict input,
__global real* asum, const int asum_offset) {
__local real lm[WGS2];
const int lid = get_local_id(0);
// Performs the first step of the reduction while loading the data
Add(lm[lid], input[lid], input[lid + WGS2]);
barrier(CLK_LOCAL_MEM_FENCE);
// Performs reduction in local memory
#pragma unroll
for (int s=WGS2/2; s>0; s=s>>1) {
if (lid < s) {
Add(lm[lid], lm[lid], lm[lid + s]);
}
barrier(CLK_LOCAL_MEM_FENCE);
}
// Computes the absolute value and stores the final result
if (lid == 0) {
#if PRECISION == 3232 || PRECISION == 6464
asum[asum_offset].x = lm[0].x + lm[0].y; // the result is a non-complex number
#else
asum[asum_offset] = lm[0];
#endif
}
}
// =================================================================================================
// End of the C++11 raw string literal
)"
// =================================================================================================

View file

@ -7,9 +7,9 @@
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file contains the Xnrm2 kernel. It implements a dot-product computation using reduction
// kernels. Reduction is split in two parts. In the first (main) kernel the X and Y vectors are
// multiplied, followed by a per-thread and a per-workgroup reduction. The second (epilogue) kernel
// This file contains the Xnrm2 kernel. It implements a squared norm computation using reduction
// kernels. Reduction is split in two parts. In the first (main) kernel the X vector is squared,
// followed by a per-thread and a per-workgroup reduction. The second (epilogue) kernel
// is executed with a single workgroup only, computing the final result.
//
// =================================================================================================
@ -29,7 +29,7 @@ R"(
// =================================================================================================
// The main reduction kernel, performing the multiplication and the majority of the sum operation
// The main reduction kernel, performing the multiplication and the majority of the operation
__attribute__((reqd_work_group_size(WGS1, 1, 1)))
__kernel void Xnrm2(const int n,
const __global real* restrict xgm, const int x_offset, const int x_inc,
@ -70,7 +70,7 @@ __kernel void Xnrm2(const int n,
// =================================================================================================
// The epilogue reduction kernel, performing the final bit of the sum operation. This kernel has to
// The epilogue reduction kernel, performing the final bit of the operation. This kernel has to
// be launched with a single workgroup only.
__attribute__((reqd_work_group_size(WGS2, 1, 1)))
__kernel void Xnrm2Epilogue(const __global real* restrict input,

View file

@ -0,0 +1,109 @@
// =================================================================================================
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
// width of 100 characters per line.
//
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file implements the Xasum class (see the header for information about the class).
//
// =================================================================================================
#include "internal/routines/level1/xasum.h"
#include <string>
#include <vector>
namespace clblast {
// =================================================================================================
// Specific implementations to get the memory-type based on a template argument
template <> const Precision Xasum<float>::precision_ = Precision::kSingle;
template <> const Precision Xasum<double>::precision_ = Precision::kDouble;
template <> const Precision Xasum<float2>::precision_ = Precision::kComplexSingle;
template <> const Precision Xasum<double2>::precision_ = Precision::kComplexDouble;
// =================================================================================================
// Constructor: forwards to base class constructor
template <typename T>
Xasum<T>::Xasum(Queue &queue, EventPointer event, const std::string &name):
Routine<T>(queue, event, name, {"Xdot"}, precision_) {
source_string_ =
#include "../../kernels/level1/xasum.opencl"
;
}
// =================================================================================================
// The main routine
template <typename T>
StatusCode Xasum<T>::DoAsum(const size_t n,
const Buffer<T> &asum_buffer, const size_t asum_offset,
const Buffer<T> &x_buffer, const size_t x_offset, const size_t x_inc) {
// Makes sure all dimensions are larger than zero
if (n == 0) { return StatusCode::kInvalidDimension; }
// Tests the vectors for validity
auto status = TestVectorX(n, x_buffer, x_offset, x_inc, sizeof(T));
if (ErrorIn(status)) { return status; }
status = TestVectorDot(1, asum_buffer, asum_offset, sizeof(T));
if (ErrorIn(status)) { return status; }
// Retrieves the Xasum kernels from the compiled binary
try {
auto& program = GetProgramFromCache();
auto kernel1 = Kernel(program, "Xasum");
auto kernel2 = Kernel(program, "XasumEpilogue");
// Creates the buffer for intermediate values
auto temp_size = 2*db_["WGS2"];
auto temp_buffer = Buffer<T>(context_, temp_size);
// Sets the kernel arguments
kernel1.SetArgument(0, static_cast<int>(n));
kernel1.SetArgument(1, x_buffer());
kernel1.SetArgument(2, static_cast<int>(x_offset));
kernel1.SetArgument(3, static_cast<int>(x_inc));
kernel1.SetArgument(4, temp_buffer());
// Event waiting list
auto eventWaitList = std::vector<Event>();
// Launches the main kernel
auto global1 = std::vector<size_t>{db_["WGS1"]*temp_size};
auto local1 = std::vector<size_t>{db_["WGS1"]};
auto kernelEvent = Event();
status = RunKernel(kernel1, global1, local1, kernelEvent.pointer());
if (ErrorIn(status)) { return status; }
eventWaitList.push_back(kernelEvent);
// Sets the arguments for the epilogue kernel
kernel2.SetArgument(0, temp_buffer());
kernel2.SetArgument(1, asum_buffer());
kernel2.SetArgument(2, static_cast<int>(asum_offset));
// Launches the epilogue kernel
auto global2 = std::vector<size_t>{db_["WGS2"]};
auto local2 = std::vector<size_t>{db_["WGS2"]};
status = RunKernel(kernel2, global2, local2, event_, eventWaitList);
if (ErrorIn(status)) { return status; }
// Succesfully finished the computation
return StatusCode::kSuccess;
} catch (...) { return StatusCode::kInvalidKernel; }
}
// =================================================================================================
// Compiles the templated class
template class Xasum<float>;
template class Xasum<double>;
template class Xasum<float2>;
template class Xasum<double2>;
// =================================================================================================
} // namespace clblast

View file

@ -69,6 +69,7 @@ StatusCode Xnrm2<T>::DoNrm2(const size_t n,
kernel1.SetArgument(2, static_cast<int>(x_offset));
kernel1.SetArgument(3, static_cast<int>(x_inc));
kernel1.SetArgument(4, temp_buffer());
// Event waiting list
auto eventWaitList = std::vector<Event>();

View file

@ -153,6 +153,7 @@ void RunTests(int argc, char *argv[], const bool silent, const std::string &name
auto ap_offsets = std::vector<size_t>{args.ap_offset};
auto dot_offsets = std::vector<size_t>{args.dot_offset};
auto nrm2_offsets = std::vector<size_t>{args.nrm2_offset};
auto asum_offsets = std::vector<size_t>{args.asum_offset};
auto alphas = std::vector<U>{args.alpha};
auto betas = std::vector<U>{args.beta};
auto x_sizes = std::vector<size_t>{args.x_size};
@ -193,6 +194,7 @@ void RunTests(int argc, char *argv[], const bool silent, const std::string &name
if (option == kArgAPOffset) { ap_offsets = tester.kOffsets; }
if (option == kArgDotOffset) { dot_offsets = tester.kOffsets; }
if (option == kArgNrm2Offset) { nrm2_offsets = tester.kOffsets; }
if (option == kArgAsumOffset) { asum_offsets = tester.kOffsets; }
if (option == kArgAlpha) { alphas = tester.kAlphaValues; }
if (option == kArgBeta) { betas = tester.kBetaValues; }
@ -233,10 +235,12 @@ void RunTests(int argc, char *argv[], const bool silent, const std::string &name
for (auto &ap_offset: ap_offsets) { r_args.ap_offset = ap_offset;
for (auto &dot_offset: dot_offsets) { r_args.dot_offset = dot_offset;
for (auto &nrm2_offset: nrm2_offsets) { r_args.nrm2_offset = nrm2_offset;
for (auto &alpha: alphas) { r_args.alpha = alpha;
for (auto &beta: betas) { r_args.beta = beta;
C::SetSizes(r_args);
regular_test_vector.push_back(r_args);
for (auto &asum_offset: asum_offsets) { r_args.asum_offset = asum_offset;
for (auto &alpha: alphas) { r_args.alpha = alpha;
for (auto &beta: betas) { r_args.beta = beta;
C::SetSizes(r_args);
regular_test_vector.push_back(r_args);
}
}
}
}

View file

@ -80,8 +80,10 @@ Arguments<U> Client<T,U>::ParseArguments(int argc, char *argv[], const GetMetric
if (o == kArgCOffset) { args.c_offset = GetArgument(argc, argv, help, kArgCOffset, size_t{0}); }
if (o == kArgAPOffset) { args.ap_offset= GetArgument(argc, argv, help, kArgAPOffset, size_t{0}); }
// Dot arguments
// Scalar result arguments
if (o == kArgDotOffset) { args.dot_offset = GetArgument(argc, argv, help, kArgDotOffset, size_t{0}); }
if (o == kArgNrm2Offset) { args.nrm2_offset = GetArgument(argc, argv, help, kArgNrm2Offset, size_t{0}); }
if (o == kArgAsumOffset) { args.asum_offset = GetArgument(argc, argv, help, kArgAsumOffset, size_t{0}); }
// Scalar values
if (o == kArgAlpha) { args.alpha = GetArgument(argc, argv, help, kArgAlpha, GetScalar<U>()); }
@ -292,6 +294,7 @@ void Client<T,U>::PrintTableRow(const Arguments<U>& args,
else if (o == kArgAPOffset) { integers.push_back(args.ap_offset); }
else if (o == kArgDotOffset) {integers.push_back(args.dot_offset); }
else if (o == kArgNrm2Offset){integers.push_back(args.nrm2_offset); }
else if (o == kArgAsumOffset){integers.push_back(args.asum_offset); }
}
auto strings = std::vector<std::string>{};
for (auto &o: options_) {

View file

@ -0,0 +1,139 @@
// =================================================================================================
// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
// width of 100 characters per line.
//
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
// This file implements a class with static methods to describe the Xasum routine. Examples of
// such 'descriptions' are how to calculate the size a of buffer or how to run the routine. These
// static methods are used by the correctness tester and the performance tester.
//
// =================================================================================================
#ifndef CLBLAST_TEST_ROUTINES_XASUM_H_
#define CLBLAST_TEST_ROUTINES_XASUM_H_
#include <vector>
#include <string>
#ifdef CLBLAST_REF_CLBLAS
#include "wrapper_clblas.h"
#endif
#ifdef CLBLAST_REF_CBLAS
#include "wrapper_cblas.h"
#endif
namespace clblast {
// =================================================================================================
// See comment at top of file for a description of the class
template <typename T>
class TestXasum {
public:
// The BLAS level: 1, 2, or 3
static size_t BLASLevel() { return 1; }
// The list of arguments relevant for this routine
static std::vector<std::string> GetOptions() {
return {kArgN,
kArgXInc,
kArgXOffset, kArgAsumOffset};
}
// Describes how to obtain the sizes of the buffers
static size_t GetSizeX(const Arguments<T> &args) {
return args.n * args.x_inc + args.x_offset;
}
static size_t GetSizeAsum(const Arguments<T> &args) {
return 1 + args.asum_offset;
}
// Describes how to set the sizes of all the buffers
static void SetSizes(Arguments<T> &args) {
args.x_size = GetSizeX(args);
args.scalar_size = GetSizeAsum(args);
}
// Describes what the default values of the leading dimensions of the matrices are
static size_t DefaultLDA(const Arguments<T> &) { return 1; } // N/A for this routine
static size_t DefaultLDB(const Arguments<T> &) { return 1; } // N/A for this routine
static size_t DefaultLDC(const Arguments<T> &) { return 1; } // N/A for this routine
// Describes which transpose options are relevant for this routine
using Transposes = std::vector<Transpose>;
static Transposes GetATransposes(const Transposes &) { return {}; } // N/A for this routine
static Transposes GetBTransposes(const Transposes &) { return {}; } // N/A for this routine
// Describes how to run the CLBlast routine
static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = Asum<T>(args.n,
buffers.scalar(), args.asum_offset,
buffers.x_vec(), args.x_offset, args.x_inc,
&queue_plain, &event);
clWaitForEvents(1, &event);
return status;
}
// Describes how to run the clBLAS routine (for correctness/performance comparison)
#ifdef CLBLAST_REF_CLBLAS
static StatusCode RunReference1(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
auto queue_plain = queue();
auto event = cl_event{};
auto status = clblasXasum<T>(args.n,
buffers.scalar(), args.asum_offset,
buffers.x_vec(), args.x_offset, args.x_inc,
1, &queue_plain, 0, nullptr, &event);
clWaitForEvents(1, &event);
return static_cast<StatusCode>(status);
}
#endif
// Describes how to run the CPU BLAS routine (for correctness/performance comparison)
#ifdef CLBLAST_REF_CBLAS
static StatusCode RunReference2(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
std::vector<T> scalar_cpu(args.scalar_size, static_cast<T>(0));
std::vector<T> x_vec_cpu(args.x_size, static_cast<T>(0));
buffers.scalar.Read(queue, args.scalar_size, scalar_cpu);
buffers.x_vec.Read(queue, args.x_size, x_vec_cpu);
cblasXasum(args.n,
scalar_cpu, args.asum_offset,
x_vec_cpu, args.x_offset, args.x_inc);
buffers.scalar.Write(queue, args.scalar_size, scalar_cpu);
return StatusCode::kSuccess;
}
#endif
// Describes how to download the results of the computation (more importantly: which buffer)
static std::vector<T> DownloadResult(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) {
std::vector<T> result(args.scalar_size, static_cast<T>(0));
buffers.scalar.Read(queue, args.scalar_size, result);
return result;
}
// Describes how to compute the indices of the result buffer
static size_t ResultID1(const Arguments<T> &) { return 1; } // N/A for this routine
static size_t ResultID2(const Arguments<T> &) { return 1; } // N/A for this routine
static size_t GetResultIndex(const Arguments<T> &args, const size_t, const size_t) {
return args.asum_offset;
}
// Describes how to compute performance metrics
static size_t GetFlops(const Arguments<T> &args) {
return args.n;
}
static size_t GetBytes(const Arguments<T> &args) {
return ((args.n) + 1) * sizeof(T);
}
};
// =================================================================================================
} // namespace clblast
// CLBLAST_TEST_ROUTINES_XASUM_H_
#endif