llama.cpp/examples/quantize/quantize.cpp

62 lines
1.8 KiB
C++
Raw Normal View History

2023-03-10 19:40:58 +01:00
#include "ggml.h"
#include "llama.h"
2023-03-10 19:40:58 +01:00
#include <cstdio>
#include <string>
2023-03-10 19:40:58 +01:00
// usage:
// ./quantize models/llama/ggml-model.bin models/llama/ggml-model-quant.bin type
2023-03-10 19:40:58 +01:00
//
int main(int argc, char ** argv) {
ggml_time_init();
if (argc < 4) {
fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type [nthread]\n", argv[0]);
fprintf(stderr, " type = %d - q4_0\n", LLAMA_FTYPE_MOSTLY_Q4_0);
fprintf(stderr, " type = %d - q4_1\n", LLAMA_FTYPE_MOSTLY_Q4_1);
fprintf(stderr, " type = %d - q4_2\n", LLAMA_FTYPE_MOSTLY_Q4_2);
2023-04-20 19:35:53 +02:00
fprintf(stderr, " type = %d - q4_3\n", LLAMA_FTYPE_MOSTLY_Q4_3);
2023-03-10 19:40:58 +01:00
return 1;
}
// needed to initialize f16 tables
{
2023-03-29 05:38:57 +02:00
struct ggml_init_params params = { 0, NULL, false };
struct ggml_context * ctx = ggml_init(params);
ggml_free(ctx);
}
2023-03-10 19:40:58 +01:00
const std::string fname_inp = argv[1];
const std::string fname_out = argv[2];
const enum llama_ftype ftype = (enum llama_ftype)atoi(argv[3]);
int nthread = argc > 4 ? atoi(argv[4]) : 0;
2023-03-10 19:40:58 +01:00
const int64_t t_main_start_us = ggml_time_us();
int64_t t_quantize_us = 0;
// load the model
{
const int64_t t_start_us = ggml_time_us();
if (llama_model_quantize(fname_inp.c_str(), fname_out.c_str(), ftype, nthread)) {
2023-03-10 19:40:58 +01:00
fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str());
return 1;
}
t_quantize_us = ggml_time_us() - t_start_us;
}
// report timing
{
const int64_t t_main_end_us = ggml_time_us();
printf("\n");
printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us/1000.0);
printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0);
2023-03-10 19:40:58 +01:00
}
return 0;
}