From 3173a62eb9f90b94fb3184131032c1c8b7aa8d86 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 16 Apr 2023 13:58:48 +0300 Subject: [PATCH] stdout : vertical align outputs for better readibility --- convert.py | 5 +++-- llama.cpp | 14 +++++++------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/convert.py b/convert.py index 4e28a45eb..7b9f043b2 100644 --- a/convert.py +++ b/convert.py @@ -951,8 +951,9 @@ class OutputFile: ndarrays = bounded_parallel_map(do_item, model.items(), concurrency=8) for i, ((name, lazy_tensor), ndarray) in enumerate(zip(model.items(), ndarrays)): - size = ' x '.join(map(str, lazy_tensor.shape)) - print(f"[{i+1}/{len(model)}] Writing tensor {name}, size {size}...") + size = ' x '.join(f"{dim:6d}" for dim in lazy_tensor.shape) + padi = len(str(len(model))) + print(f"[{i+1:{padi}d}/{len(model)}] Writing tensor {name:38s} | size {size:16} | type {lazy_tensor.data_type}") of.write_tensor_header(name, lazy_tensor.shape, lazy_tensor.data_type) ndarray.tofile(of.fout) of.fout.close() diff --git a/llama.cpp b/llama.cpp index a0d7e5137..a6429a4e7 100644 --- a/llama.cpp +++ b/llama.cpp @@ -262,12 +262,12 @@ static size_t checked_div(size_t a, size_t b) { } static std::string llama_format_tensor_shape(const std::vector & ne) { - std::string ret = "[" + std::to_string(ne.at(0)); + char buf[256]; + snprintf(buf, sizeof(buf), "%5u", ne.at(0)); for (size_t i = 1; i < ne.size(); i++) { - ret += " x " + std::to_string(ne.at(i)); + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " x %5u", ne.at(i)); } - ret += "]"; - return ret; + return buf; } static size_t llama_calc_tensor_size(const std::vector & ne, enum ggml_type type) { @@ -942,8 +942,8 @@ static void llama_model_load_internal( ml->ggml_ctx = ctx; model.tok_embeddings = ml->get_tensor("tok_embeddings.weight", {n_embd, n_vocab}); - model.norm = ml->get_tensor("norm.weight", {n_embd}); - model.output = ml->get_tensor("output.weight", {n_embd, n_vocab}); + model.norm = ml->get_tensor("norm.weight", {n_embd}); + model.output = ml->get_tensor("output.weight", {n_embd, n_vocab}); model.layers.resize(n_layer); for (uint32_t i = 0; i < n_layer; ++i) { @@ -1570,7 +1570,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s tensor.data = read_data.addr; model_loader->load_data_for(tensor); - printf("[%zu/%zu] %36s - %s, type = %6s, ", + printf("[%4zu/%4zu] %36s - %16s, type = %6s, ", ++idx, model_loader->tensors_map.tensors.size(), tensor.name.c_str(), llama_format_tensor_shape(tensor.ne).c_str(), ggml_type_name(tensor.type));