#include "common.h" #include "llama.h" #include "build-info.h" #include #if defined(_MSC_VER) #pragma warning(disable: 4244 4267) // possible loss of data #endif int main(int argc, char ** argv) { gpt_params params; if (gpt_params_parse(argc, argv, params) == false) { return 1; } params.embedding = true; if (params.n_ctx > 2048) { fprintf(stderr, "%s: warning: model might not support context sizes greater than 2048 tokens (%d specified);" "expect poor results\n", __func__, params.n_ctx); } fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT); if (params.seed == LLAMA_DEFAULT_SEED) { params.seed = time(NULL); } fprintf(stderr, "%s: seed = %u\n", __func__, params.seed); std::mt19937 rng(params.seed); if (params.random_prompt) { params.prompt = gpt_random_prompt(rng); } llama_init_backend(params.numa); llama_model * model; llama_context * ctx; // load the model std::tie(model, ctx) = llama_init_from_gpt_params(params); if (model == NULL) { fprintf(stderr, "%s: error: unable to load model\n", __func__); return 1; } // print system information { fprintf(stderr, "\n"); fprintf(stderr, "system_info: n_threads = %d / %d | %s\n", params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info()); } int n_past = 0; // Add a space in front of the first character to match OG llama tokenizer behavior params.prompt.insert(0, 1, ' '); // tokenize the prompt auto embd_inp = ::llama_tokenize(ctx, params.prompt, true); if (params.verbose_prompt) { fprintf(stderr, "\n"); fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str()); fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size()); for (int i = 0; i < (int) embd_inp.size(); i++) { fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_str(ctx, embd_inp[i])); } fprintf(stderr, "\n"); } if (params.embedding){ if (embd_inp.size() > 0) { if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past, params.n_threads)) { fprintf(stderr, "%s : failed to eval\n", __func__); return 1; } } const int n_embd = llama_n_embd(ctx); const auto embeddings = llama_get_embeddings(ctx); for (int i = 0; i < n_embd; i++) { printf("%f ", embeddings[i]); } printf("\n"); } llama_print_timings(ctx); llama_free(ctx); llama_free_model(model); return 0; }