Remove C++20 requirement (#257)

* Remove C++20 requirement

* Roll back C features not supported in VS2017
pull/260/head
Roland Rabien 2022-12-11 10:03:07 -08:00 committed by GitHub
parent 6ed786957e
commit e70d47baab
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 25 additions and 30 deletions

6
ggml.c
View File

@ -155,7 +155,8 @@ static inline float fp32_from_bits(uint32_t w) {
union { union {
uint32_t as_bits; uint32_t as_bits;
float as_value; float as_value;
} fp32 = { w }; } fp32;
fp32.as_bits = w;
return fp32.as_value; return fp32.as_value;
} }
@ -163,7 +164,8 @@ static inline uint32_t fp32_to_bits(float f) {
union { union {
float as_value; float as_value;
uint32_t as_bits; uint32_t as_bits;
} fp32 = { f }; } fp32;
fp32.as_value = f;
return fp32.as_bits; return fp32.as_bits;
} }

View File

@ -735,10 +735,9 @@ static bool whisper_model_load(const std::string & fname, whisper_context & wctx
// create the ggml context // create the ggml context
{ {
struct ggml_init_params params = { struct ggml_init_params params;
.mem_size = wctx.buf_model->size(), params.mem_size = wctx.buf_model->size();
.mem_buffer = wctx.buf_model->data(), params.mem_buffer = wctx.buf_model->data();
};
model.ctx = ggml_init(params); model.ctx = ggml_init(params);
if (!model.ctx) { if (!model.ctx) {
@ -945,10 +944,9 @@ static bool whisper_model_load(const std::string & fname, whisper_context & wctx
// create the ggml memory context // create the ggml memory context
{ {
struct ggml_init_params params = { struct ggml_init_params params;
.mem_size = wctx.buf_memory.size(), params.mem_size = wctx.buf_memory.size();
.mem_buffer = wctx.buf_memory.data(), params.mem_buffer = wctx.buf_memory.data();
};
model.ctx_mem = ggml_init(params); model.ctx_mem = ggml_init(params);
if (!model.ctx_mem) { if (!model.ctx_mem) {
@ -1097,10 +1095,9 @@ static bool whisper_encode(
const int n_mels = hparams.n_mels; const int n_mels = hparams.n_mels;
assert(mel_inp.n_mel == n_mels); assert(mel_inp.n_mel == n_mels);
struct ggml_init_params params = { struct ggml_init_params params;
.mem_size = wctx.buf_compute.size(), params.mem_size = wctx.buf_compute.size();
.mem_buffer = wctx.buf_compute.data(), params.mem_buffer = wctx.buf_compute.data();
};
struct ggml_context * ctx0 = ggml_init(params); struct ggml_context * ctx0 = ggml_init(params);
@ -1175,10 +1172,9 @@ static bool whisper_encode(
// create separate context for each layer to reduce memory usage // create separate context for each layer to reduce memory usage
struct ggml_init_params paramsL = { struct ggml_init_params paramsL;
.mem_size = wctx.buf_compute_layer.size(), paramsL.mem_size = wctx.buf_compute_layer.size();
.mem_buffer = wctx.buf_compute_layer.data(), paramsL.mem_buffer = wctx.buf_compute_layer.data();
};
struct ggml_context * ctxL = ggml_init(paramsL); struct ggml_context * ctxL = ggml_init(paramsL);
@ -1512,10 +1508,9 @@ static bool whisper_decode(
const int N = n_tokens; const int N = n_tokens;
const int M = wctx.exp_n_audio_ctx > 0 ? wctx.exp_n_audio_ctx : hparams.n_audio_ctx; const int M = wctx.exp_n_audio_ctx > 0 ? wctx.exp_n_audio_ctx : hparams.n_audio_ctx;
struct ggml_init_params params = { struct ggml_init_params params;
.mem_size = wctx.buf_compute.size(), params.mem_size = wctx.buf_compute.size();
.mem_buffer = wctx.buf_compute.data(), params.mem_buffer = wctx.buf_compute.data();
};
struct ggml_context * ctx0 = ggml_init(params); struct ggml_context * ctx0 = ggml_init(params);
@ -1538,10 +1533,9 @@ static bool whisper_decode(
for (int il = 0; il < n_layer; ++il) { for (int il = 0; il < n_layer; ++il) {
const auto & layer = model.layers_decoder[il]; const auto & layer = model.layers_decoder[il];
struct ggml_init_params paramsL = { struct ggml_init_params paramsL;
.mem_size = wctx.buf_compute_layer.size(), paramsL.mem_size = wctx.buf_compute_layer.size();
.mem_buffer = wctx.buf_compute_layer.data(), paramsL.mem_buffer = wctx.buf_compute_layer.data();
};
struct ggml_context * ctxL = ggml_init(paramsL); struct ggml_context * ctxL = ggml_init(paramsL);
struct ggml_cgraph gf = {}; struct ggml_cgraph gf = {};
@ -2915,10 +2909,9 @@ int whisper_full_parallel(
// create the ggml memory context // create the ggml memory context
{ {
struct ggml_init_params params = { struct ggml_init_params params;
.mem_size = ctxs[i].buf_memory.size(), params.mem_size = ctxs[i].buf_memory.size();
.mem_buffer = ctxs[i].buf_memory.data(), params.mem_buffer = ctxs[i].buf_memory.data();
};
model.ctx_mem = ggml_init(params); model.ctx_mem = ggml_init(params);
if (!model.ctx_mem) { if (!model.ctx_mem) {