ggml : export symbols (#1155)

This commit is contained in:
Georgi Gerganov 2023-04-24 22:18:25 +03:00 committed by GitHub
parent 0c5692345d
commit 8a0f8673ba
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

471
ggml.h
View file

@ -169,14 +169,27 @@
// //
// //
#ifdef __cplusplus #ifdef GGML_SHARED
extern "C" { # if defined(_WIN32) && !defined(__MINGW32__)
# ifdef GGML_BUILD
# define GGML_API __declspec(dllexport)
# else
# define GGML_API __declspec(dllimport)
# endif
# else
# define GGML_API __attribute__ ((visibility ("default")))
# endif
#else
# define GGML_API
#endif #endif
#include <stdint.h> #include <stdint.h>
#include <stddef.h> #include <stddef.h>
#include <stdbool.h> #include <stdbool.h>
#define GGML_FILE_MAGIC 0x67676d6c // "ggml"
#define GGML_FILE_VERSION 1
#define GGML_MAX_DIMS 4 #define GGML_MAX_DIMS 4
#define GGML_MAX_NODES 4096 #define GGML_MAX_NODES 4096
#define GGML_MAX_PARAMS 16 #define GGML_MAX_PARAMS 16
@ -184,22 +197,25 @@ extern "C" {
#define GGML_MAX_OPT 4 #define GGML_MAX_OPT 4
#define GGML_DEFAULT_N_THREADS 4 #define GGML_DEFAULT_N_THREADS 4
#ifdef __ARM_NEON #ifdef __cplusplus
// we use the built-in 16-bit float type extern "C" {
typedef __fp16 ggml_fp16_t;
#else
typedef uint16_t ggml_fp16_t;
#endif #endif
// convert FP16 <-> FP32 #ifdef __ARM_NEON
float ggml_fp16_to_fp32(ggml_fp16_t x); // we use the built-in 16-bit float type
ggml_fp16_t ggml_fp32_to_fp16(float x); typedef __fp16 ggml_fp16_t;
#else
typedef uint16_t ggml_fp16_t;
#endif
struct ggml_object; // convert FP16 <-> FP32
struct ggml_context; GGML_API float ggml_fp16_to_fp32(ggml_fp16_t x);
GGML_API ggml_fp16_t ggml_fp32_to_fp16(float x);
enum ggml_type { struct ggml_object;
// explicitly numbered values are used in llama.cpp files struct ggml_context;
enum ggml_type {
GGML_TYPE_F32 = 0, GGML_TYPE_F32 = 0,
GGML_TYPE_F16 = 1, GGML_TYPE_F16 = 1,
GGML_TYPE_Q4_0 = 2, GGML_TYPE_Q4_0 = 2,
@ -211,10 +227,10 @@ enum ggml_type {
GGML_TYPE_I16, GGML_TYPE_I16,
GGML_TYPE_I32, GGML_TYPE_I32,
GGML_TYPE_COUNT, GGML_TYPE_COUNT,
}; };
// available tensor operations: // available tensor operations:
enum ggml_op { enum ggml_op {
GGML_OP_NONE = 0, GGML_OP_NONE = 0,
GGML_OP_DUP, GGML_OP_DUP,
@ -260,23 +276,23 @@ enum ggml_op {
GGML_OP_MAP_BINARY, GGML_OP_MAP_BINARY,
GGML_OP_COUNT, GGML_OP_COUNT,
}; };
// ggml object // ggml object
struct ggml_object { struct ggml_object {
size_t offs; size_t offs;
size_t size; size_t size;
struct ggml_object * next; struct ggml_object * next;
char padding[8]; char padding[8];
}; };
static const size_t GGML_OBJECT_SIZE = sizeof(struct ggml_object); static const size_t GGML_OBJECT_SIZE = sizeof(struct ggml_object);
// n-dimensional tensor // n-dimensional tensor
struct ggml_tensor { struct ggml_tensor {
enum ggml_type type; enum ggml_type type;
int n_dims; int n_dims;
@ -306,10 +322,10 @@ struct ggml_tensor {
void * data; void * data;
char padding[8]; char padding[8];
}; };
// computation graph // computation graph
struct ggml_cgraph { struct ggml_cgraph {
int n_nodes; int n_nodes;
int n_leafs; int n_leafs;
int n_threads; int n_threads;
@ -325,76 +341,80 @@ struct ggml_cgraph {
int perf_runs; int perf_runs;
int64_t perf_cycles; int64_t perf_cycles;
int64_t perf_time_us; int64_t perf_time_us;
}; };
// scratch buffer // scratch buffer
struct ggml_scratch { struct ggml_scratch {
size_t offs; size_t offs;
size_t size; size_t size;
void * data; void * data;
}; };
struct ggml_init_params { struct ggml_init_params {
// memory pool // memory pool
size_t mem_size; // bytes size_t mem_size; // bytes
void * mem_buffer; // if NULL, memory will be allocated internally void * mem_buffer; // if NULL, memory will be allocated internally
bool no_alloc; // don't allocate memory for the tensor data bool no_alloc; // don't allocate memory for the tensor data
}; };
void ggml_time_init(void); // call this once at the beginning of the program // misc
int64_t ggml_time_ms(void);
int64_t ggml_time_us(void);
int64_t ggml_cycles(void);
int64_t ggml_cycles_per_ms(void);
void ggml_print_object (const struct ggml_object * obj); GGML_API void ggml_time_init(void); // call this once at the beginning of the program
void ggml_print_objects(const struct ggml_context * ctx); GGML_API int64_t ggml_time_ms(void);
GGML_API int64_t ggml_time_us(void);
GGML_API int64_t ggml_cycles(void);
GGML_API int64_t ggml_cycles_per_ms(void);
int64_t ggml_nelements(const struct ggml_tensor * tensor); GGML_API void ggml_print_object (const struct ggml_object * obj);
size_t ggml_nbytes (const struct ggml_tensor * tensor); GGML_API void ggml_print_objects(const struct ggml_context * ctx);
int ggml_blck_size (enum ggml_type type); GGML_API int64_t ggml_nelements(const struct ggml_tensor * tensor);
size_t ggml_type_size (enum ggml_type type); // size in bytes for all elements in a block GGML_API size_t ggml_nbytes (const struct ggml_tensor * tensor);
float ggml_type_sizef(enum ggml_type type); // ggml_type_size()/ggml_blck_size() as float
const char * ggml_type_name(enum ggml_type type); GGML_API int ggml_blck_size (enum ggml_type type);
GGML_API size_t ggml_type_size (enum ggml_type type); // size in bytes for all elements in a block
GGML_API float ggml_type_sizef(enum ggml_type type); // ggml_type_size()/ggml_blck_size() as float
size_t ggml_element_size(const struct ggml_tensor * tensor); GGML_API const char * ggml_type_name(enum ggml_type type);
bool ggml_is_quantized(enum ggml_type type); GGML_API size_t ggml_element_size(const struct ggml_tensor * tensor);
struct ggml_context * ggml_init(struct ggml_init_params params); GGML_API bool ggml_is_quantized(enum ggml_type type);
void ggml_free(struct ggml_context * ctx);
size_t ggml_used_mem(const struct ggml_context * ctx); // main
size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch); GGML_API struct ggml_context * ggml_init(struct ggml_init_params params);
GGML_API void ggml_free(struct ggml_context * ctx);
struct ggml_tensor * ggml_new_tensor( GGML_API size_t ggml_used_mem(const struct ggml_context * ctx);
GGML_API size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch);
GGML_API struct ggml_tensor * ggml_new_tensor(
struct ggml_context * ctx, struct ggml_context * ctx,
enum ggml_type type, enum ggml_type type,
int n_dims, int n_dims,
const int64_t *ne); const int64_t *ne);
struct ggml_tensor * ggml_new_tensor_1d( GGML_API struct ggml_tensor * ggml_new_tensor_1d(
struct ggml_context * ctx, struct ggml_context * ctx,
enum ggml_type type, enum ggml_type type,
int64_t ne0); int64_t ne0);
struct ggml_tensor * ggml_new_tensor_2d( GGML_API struct ggml_tensor * ggml_new_tensor_2d(
struct ggml_context * ctx, struct ggml_context * ctx,
enum ggml_type type, enum ggml_type type,
int64_t ne0, int64_t ne0,
int64_t ne1); int64_t ne1);
struct ggml_tensor * ggml_new_tensor_3d( GGML_API struct ggml_tensor * ggml_new_tensor_3d(
struct ggml_context * ctx, struct ggml_context * ctx,
enum ggml_type type, enum ggml_type type,
int64_t ne0, int64_t ne0,
int64_t ne1, int64_t ne1,
int64_t ne2); int64_t ne2);
struct ggml_tensor * ggml_new_tensor_4d( GGML_API struct ggml_tensor * ggml_new_tensor_4d(
struct ggml_context * ctx, struct ggml_context * ctx,
enum ggml_type type, enum ggml_type type,
int64_t ne0, int64_t ne0,
@ -402,185 +422,184 @@ struct ggml_tensor * ggml_new_tensor_4d(
int64_t ne2, int64_t ne2,
int64_t ne3); int64_t ne3);
struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value); GGML_API struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value);
struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value); GGML_API struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value);
struct ggml_tensor * ggml_dup_tensor (struct ggml_context * ctx, const struct ggml_tensor * src); GGML_API struct ggml_tensor * ggml_dup_tensor (struct ggml_context * ctx, const struct ggml_tensor * src);
struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, const struct ggml_tensor * src); GGML_API struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, const struct ggml_tensor * src);
struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor); GGML_API struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor);
struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value); GGML_API struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value);
struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value); GGML_API struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value);
int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i); GGML_API int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i);
void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value); GGML_API void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value);
float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i); GGML_API float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i);
void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value); GGML_API void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value);
void * ggml_get_data (const struct ggml_tensor * tensor); GGML_API void * ggml_get_data (const struct ggml_tensor * tensor);
float * ggml_get_data_f32(const struct ggml_tensor * tensor); GGML_API float * ggml_get_data_f32(const struct ggml_tensor * tensor);
// //
// operations on tensors with backpropagation // operations on tensors with backpropagation
// //
struct ggml_tensor * ggml_dup( GGML_API struct ggml_tensor * ggml_dup(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a); struct ggml_tensor * a);
struct ggml_tensor * ggml_add( GGML_API struct ggml_tensor * ggml_add(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
struct ggml_tensor * b); struct ggml_tensor * b);
GGML_API struct ggml_tensor * ggml_add_inplace(
struct ggml_tensor * ggml_add_inplace(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
struct ggml_tensor * b); struct ggml_tensor * b);
struct ggml_tensor * ggml_sub( GGML_API struct ggml_tensor * ggml_sub(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
struct ggml_tensor * b); struct ggml_tensor * b);
struct ggml_tensor * ggml_mul( GGML_API struct ggml_tensor * ggml_mul(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
struct ggml_tensor * b); struct ggml_tensor * b);
struct ggml_tensor * ggml_div( GGML_API struct ggml_tensor * ggml_div(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
struct ggml_tensor * b); struct ggml_tensor * b);
struct ggml_tensor * ggml_sqr( GGML_API struct ggml_tensor * ggml_sqr(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a); struct ggml_tensor * a);
struct ggml_tensor * ggml_sqrt( GGML_API struct ggml_tensor * ggml_sqrt(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a); struct ggml_tensor * a);
// return scalar // return scalar
// TODO: compute sum along rows // TODO: compute sum along rows
struct ggml_tensor * ggml_sum( GGML_API struct ggml_tensor * ggml_sum(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a); struct ggml_tensor * a);
// mean along rows // mean along rows
struct ggml_tensor * ggml_mean( GGML_API struct ggml_tensor * ggml_mean(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a); struct ggml_tensor * a);
// if a is the same shape as b, and a is not parameter, return a // if a is the same shape as b, and a is not parameter, return a
// otherwise, return a new tensor: repeat(a) to fit in b // otherwise, return a new tensor: repeat(a) to fit in b
struct ggml_tensor * ggml_repeat( GGML_API struct ggml_tensor * ggml_repeat(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
struct ggml_tensor * b); struct ggml_tensor * b);
struct ggml_tensor * ggml_abs( GGML_API struct ggml_tensor * ggml_abs(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a); struct ggml_tensor * a);
struct ggml_tensor * ggml_sgn( GGML_API struct ggml_tensor * ggml_sgn(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a); struct ggml_tensor * a);
struct ggml_tensor * ggml_neg( GGML_API struct ggml_tensor * ggml_neg(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a); struct ggml_tensor * a);
struct ggml_tensor * ggml_step( GGML_API struct ggml_tensor * ggml_step(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a); struct ggml_tensor * a);
struct ggml_tensor * ggml_relu( GGML_API struct ggml_tensor * ggml_relu(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a); struct ggml_tensor * a);
// TODO: double-check this computation is correct // TODO: double-check this computation is correct
struct ggml_tensor * ggml_gelu( GGML_API struct ggml_tensor * ggml_gelu(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a); struct ggml_tensor * a);
struct ggml_tensor * ggml_silu( GGML_API struct ggml_tensor * ggml_silu(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a); struct ggml_tensor * a);
// normalize along rows // normalize along rows
// TODO: eps is hardcoded to 1e-5 for now // TODO: eps is hardcoded to 1e-5 for now
struct ggml_tensor * ggml_norm( GGML_API struct ggml_tensor * ggml_norm(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a); struct ggml_tensor * a);
struct ggml_tensor * ggml_rms_norm( GGML_API struct ggml_tensor * ggml_rms_norm(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a); struct ggml_tensor * a);
// A: m rows, n columns // A: m rows, n columns
// B: p rows, n columns (i.e. we transpose it internally) // B: p rows, n columns (i.e. we transpose it internally)
// result is m columns, p rows // result is m columns, p rows
struct ggml_tensor * ggml_mul_mat( GGML_API struct ggml_tensor * ggml_mul_mat(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
struct ggml_tensor * b); struct ggml_tensor * b);
// //
// operations on tensors without backpropagation // operations on tensors without backpropagation
// //
// in-place, returns view(a) // in-place, returns view(a)
struct ggml_tensor * ggml_scale( GGML_API struct ggml_tensor * ggml_scale(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
struct ggml_tensor * b); struct ggml_tensor * b);
// a -> b, return view(b) // a -> b, return view(b)
struct ggml_tensor * ggml_cpy( GGML_API struct ggml_tensor * ggml_cpy(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
struct ggml_tensor * b); struct ggml_tensor * b);
// make contiguous // make contiguous
struct ggml_tensor * ggml_cont( GGML_API struct ggml_tensor * ggml_cont(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a); struct ggml_tensor * a);
// return view(a), b specifies the new shape // return view(a), b specifies the new shape
// TODO: when we start computing gradient, make a copy instead of view // TODO: when we start computing gradient, make a copy instead of view
struct ggml_tensor * ggml_reshape( GGML_API struct ggml_tensor * ggml_reshape(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
struct ggml_tensor * b); struct ggml_tensor * b);
// return view(a) // return view(a)
// TODO: when we start computing gradient, make a copy instead of view // TODO: when we start computing gradient, make a copy instead of view
struct ggml_tensor * ggml_reshape_2d( GGML_API struct ggml_tensor * ggml_reshape_2d(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
int64_t ne0, int64_t ne0,
int64_t ne1); int64_t ne1);
// return view(a) // return view(a)
// TODO: when we start computing gradient, make a copy instead of view // TODO: when we start computing gradient, make a copy instead of view
struct ggml_tensor * ggml_reshape_3d( GGML_API struct ggml_tensor * ggml_reshape_3d(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
int64_t ne0, int64_t ne0,
int64_t ne1, int64_t ne1,
int64_t ne2); int64_t ne2);
// offset in bytes // offset in bytes
struct ggml_tensor * ggml_view_1d( GGML_API struct ggml_tensor * ggml_view_1d(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
int64_t ne0, int64_t ne0,
size_t offset); size_t offset);
struct ggml_tensor * ggml_view_2d( GGML_API struct ggml_tensor * ggml_view_2d(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
int64_t ne0, int64_t ne0,
@ -588,7 +607,7 @@ struct ggml_tensor * ggml_view_2d(
size_t nb1, // row stride in bytes size_t nb1, // row stride in bytes
size_t offset); size_t offset);
struct ggml_tensor * ggml_view_3d( GGML_API struct ggml_tensor * ggml_view_3d(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
int64_t ne0, int64_t ne0,
@ -598,7 +617,7 @@ struct ggml_tensor * ggml_view_3d(
size_t nb2, // slice stride in bytes size_t nb2, // slice stride in bytes
size_t offset); size_t offset);
struct ggml_tensor * ggml_permute( GGML_API struct ggml_tensor * ggml_permute(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
int axis0, int axis0,
@ -606,62 +625,62 @@ struct ggml_tensor * ggml_permute(
int axis2, int axis2,
int axis3); int axis3);
// alias for ggml_permute(ctx, a, 1, 0, 2, 3) // alias for ggml_permute(ctx, a, 1, 0, 2, 3)
struct ggml_tensor * ggml_transpose( GGML_API struct ggml_tensor * ggml_transpose(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a); struct ggml_tensor * a);
struct ggml_tensor * ggml_get_rows( GGML_API struct ggml_tensor * ggml_get_rows(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
struct ggml_tensor * b); struct ggml_tensor * b);
// set elements above the diagonal to -INF // set elements above the diagonal to -INF
// in-place, returns view(a) // in-place, returns view(a)
struct ggml_tensor * ggml_diag_mask_inf( GGML_API struct ggml_tensor * ggml_diag_mask_inf(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
int n_past); int n_past);
// in-place, returns view(a) // in-place, returns view(a)
struct ggml_tensor * ggml_soft_max( GGML_API struct ggml_tensor * ggml_soft_max(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a); struct ggml_tensor * a);
// rotary position embedding // rotary position embedding
// in-place, returns view(a) // in-place, returns view(a)
// if mode & 1 == 1, skip n_past elements // if mode & 1 == 1, skip n_past elements
// if mode & 2 == 1, GPT-NeoX style // if mode & 2 == 1, GPT-NeoX style
// TODO: avoid creating a new tensor every time // TODO: avoid creating a new tensor every time
struct ggml_tensor * ggml_rope( GGML_API struct ggml_tensor * ggml_rope(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
int n_past, int n_past,
int n_dims, int n_dims,
int mode); int mode);
// padding = 1 // padding = 1
// TODO: we don't support extra parameters for now // TODO: we don't support extra parameters for now
// that's why we are hard-coding the stride, padding, and dilation // that's why we are hard-coding the stride, padding, and dilation
// not great .. // not great ..
struct ggml_tensor * ggml_conv_1d_1s( GGML_API struct ggml_tensor * ggml_conv_1d_1s(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
struct ggml_tensor * b); struct ggml_tensor * b);
struct ggml_tensor * ggml_conv_1d_2s( GGML_API struct ggml_tensor * ggml_conv_1d_2s(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
struct ggml_tensor * b); struct ggml_tensor * b);
struct ggml_tensor * ggml_flash_attn( GGML_API struct ggml_tensor * ggml_flash_attn(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * q, struct ggml_tensor * q,
struct ggml_tensor * k, struct ggml_tensor * k,
struct ggml_tensor * v, struct ggml_tensor * v,
bool masked); bool masked);
struct ggml_tensor * ggml_flash_ff( GGML_API struct ggml_tensor * ggml_flash_ff(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
struct ggml_tensor * b0, struct ggml_tensor * b0,
@ -669,64 +688,64 @@ struct ggml_tensor * ggml_flash_ff(
struct ggml_tensor * c0, struct ggml_tensor * c0,
struct ggml_tensor * c1); struct ggml_tensor * c1);
// Mapping operations // Mapping operations
typedef void (*ggml_unary_op_f32_t)(const int, float *, const float *); GGML_API typedef void (*ggml_unary_op_f32_t)(const int, float *, const float *);
typedef void (*ggml_binary_op_f32_t)(const int, float *, const float *, const float *); GGML_API typedef void (*ggml_binary_op_f32_t)(const int, float *, const float *, const float *);
struct ggml_tensor * ggml_map_unary_f32( GGML_API struct ggml_tensor * ggml_map_unary_f32(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
const ggml_unary_op_f32_t fun); const ggml_unary_op_f32_t fun);
struct ggml_tensor * ggml_map_binary_f32( GGML_API struct ggml_tensor * ggml_map_binary_f32(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
struct ggml_tensor * b, struct ggml_tensor * b,
const ggml_binary_op_f32_t fun); const ggml_binary_op_f32_t fun);
// //
// automatic differentiation // automatic differentiation
// //
void ggml_set_param( GGML_API void ggml_set_param(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * tensor); struct ggml_tensor * tensor);
void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor); GGML_API void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor);
struct ggml_cgraph ggml_build_forward (struct ggml_tensor * tensor); GGML_API struct ggml_cgraph ggml_build_forward (struct ggml_tensor * tensor);
struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep); GGML_API struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep);
void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph); GGML_API void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph);
void ggml_graph_reset (struct ggml_cgraph * cgraph); GGML_API void ggml_graph_reset (struct ggml_cgraph * cgraph);
// print info and performance information for the graph // print info and performance information for the graph
void ggml_graph_print(const struct ggml_cgraph * cgraph); GGML_API void ggml_graph_print(const struct ggml_cgraph * cgraph);
// dump the graph into a file using the dot format // dump the graph into a file using the dot format
void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename); GGML_API void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename);
// //
// optimization // optimization
// //
// optimization methods // optimization methods
enum ggml_opt_type { enum ggml_opt_type {
GGML_OPT_ADAM, GGML_OPT_ADAM,
GGML_OPT_LBFGS, GGML_OPT_LBFGS,
}; };
// linesearch methods // linesearch methods
enum ggml_linesearch { enum ggml_linesearch {
GGML_LINESEARCH_DEFAULT = 1, GGML_LINESEARCH_DEFAULT = 1,
GGML_LINESEARCH_BACKTRACKING_ARMIJO = 0, GGML_LINESEARCH_BACKTRACKING_ARMIJO = 0,
GGML_LINESEARCH_BACKTRACKING_WOLFE = 1, GGML_LINESEARCH_BACKTRACKING_WOLFE = 1,
GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE = 2, GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE = 2,
}; };
// optimization return values // optimization return values
enum ggml_opt_result { enum ggml_opt_result {
GGML_OPT_OK = 0, GGML_OPT_OK = 0,
GGML_OPT_DID_NOT_CONVERGE, GGML_OPT_DID_NOT_CONVERGE,
GGML_OPT_NO_CONTEXT, GGML_OPT_NO_CONTEXT,
@ -738,13 +757,13 @@ enum ggml_opt_result {
GGML_LINESEARCH_MAXIMUM_STEP, GGML_LINESEARCH_MAXIMUM_STEP,
GGML_LINESEARCH_MAXIMUM_ITERATIONS, GGML_LINESEARCH_MAXIMUM_ITERATIONS,
GGML_LINESEARCH_INVALID_PARAMETERS, GGML_LINESEARCH_INVALID_PARAMETERS,
}; };
// optimization parameters // optimization parameters
// //
// see ggml.c (ggml_opt_default_params) for default values // see ggml.c (ggml_opt_default_params) for default values
// //
struct ggml_opt_params { struct ggml_opt_params {
enum ggml_opt_type type; enum ggml_opt_type type;
int n_threads; int n_threads;
@ -795,71 +814,71 @@ struct ggml_opt_params {
enum ggml_linesearch linesearch; enum ggml_linesearch linesearch;
} lbfgs; } lbfgs;
}; };
struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type); GGML_API struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type);
// optimize the function defined by the tensor f // optimize the function defined by the tensor f
enum ggml_opt_result ggml_opt( GGML_API enum ggml_opt_result ggml_opt(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_opt_params params, struct ggml_opt_params params,
struct ggml_tensor * f); struct ggml_tensor * f);
// //
// quantization // quantization
// //
size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist); GGML_API size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist);
size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist); GGML_API size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist);
size_t ggml_quantize_q4_2(const float * src, void * dst, int n, int k, int64_t * hist); GGML_API size_t ggml_quantize_q4_2(const float * src, void * dst, int n, int k, int64_t * hist);
size_t ggml_quantize_q4_3(const float * src, void * dst, int n, int k, int64_t * hist); GGML_API size_t ggml_quantize_q4_3(const float * src, void * dst, int n, int k, int64_t * hist);
size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist); GGML_API size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist);
// //
// system info // system info
// //
int ggml_cpu_has_avx(void); GGML_API int ggml_cpu_has_avx (void);
int ggml_cpu_has_avx2(void); GGML_API int ggml_cpu_has_avx2 (void);
int ggml_cpu_has_avx512(void); GGML_API int ggml_cpu_has_avx512 (void);
int ggml_cpu_has_avx512_vbmi(void); GGML_API int ggml_cpu_has_avx512_vbmi(void);
int ggml_cpu_has_avx512_vnni(void); GGML_API int ggml_cpu_has_avx512_vnni(void);
int ggml_cpu_has_fma(void); GGML_API int ggml_cpu_has_fma (void);
int ggml_cpu_has_neon(void); GGML_API int ggml_cpu_has_neon (void);
int ggml_cpu_has_arm_fma(void); GGML_API int ggml_cpu_has_arm_fma (void);
int ggml_cpu_has_f16c(void); GGML_API int ggml_cpu_has_f16c (void);
int ggml_cpu_has_fp16_va(void); GGML_API int ggml_cpu_has_fp16_va (void);
int ggml_cpu_has_wasm_simd(void); GGML_API int ggml_cpu_has_wasm_simd (void);
int ggml_cpu_has_blas(void); GGML_API int ggml_cpu_has_blas (void);
int ggml_cpu_has_cublas(void); GGML_API int ggml_cpu_has_cublas (void);
int ggml_cpu_has_sse3(void); GGML_API int ggml_cpu_has_sse3 (void);
int ggml_cpu_has_vsx(void); GGML_API int ggml_cpu_has_vsx (void);
// //
// Internal types and functions exposed for tests and benchmarks // Internal types and functions exposed for tests and benchmarks
// //
#ifdef __cplusplus #ifdef __cplusplus
// restrict not standard in C++ // restrict not standard in C++
#define GGML_RESTRICT #define GGML_RESTRICT
#else #else
#define GGML_RESTRICT restrict #define GGML_RESTRICT restrict
#endif #endif
typedef void (*dequantize_row_q_t)(const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int k); typedef void (*dequantize_row_q_t)(const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int k);
typedef void (*quantize_row_q_t)(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int k); typedef void (*quantize_row_q_t) (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int k);
typedef void (*vec_dot_q_t)(const int n, float * GGML_RESTRICT s, const void * GGML_RESTRICT x, const void * GGML_RESTRICT y); typedef void (*vec_dot_q_t) (const int n, float * GGML_RESTRICT s, const void * GGML_RESTRICT x, const void * GGML_RESTRICT y);
typedef struct { typedef struct {
dequantize_row_q_t dequantize_row_q; dequantize_row_q_t dequantize_row_q;
quantize_row_q_t quantize_row_q; quantize_row_q_t quantize_row_q;
quantize_row_q_t quantize_row_q_reference; quantize_row_q_t quantize_row_q_reference;
quantize_row_q_t quantize_row_q_dot; quantize_row_q_t quantize_row_q_dot;
vec_dot_q_t vec_dot_q; vec_dot_q_t vec_dot_q;
} quantize_fns_t; } quantize_fns_t;
quantize_fns_t ggml_internal_get_quantize_fn(size_t i); quantize_fns_t ggml_internal_get_quantize_fn(size_t i);
#ifdef __cplusplus #ifdef __cplusplus
} }