diff --git a/convert-baichuan-hf-to-gguf.py b/convert-baichuan-hf-to-gguf.py index a1783f71f..3b64ecb88 100755 --- a/convert-baichuan-hf-to-gguf.py +++ b/convert-baichuan-hf-to-gguf.py @@ -230,7 +230,7 @@ gguf_writer.add_token_list(tokens) gguf_writer.add_token_scores(scores) gguf_writer.add_token_types(toktypes) -special_vocab = gguf.SpecialVocab(dir_model) +special_vocab = gguf.SpecialVocab(dir_model, n_vocab = len(tokens)) special_vocab.add_to_gguf(gguf_writer) # TENSORS diff --git a/convert-bloom-hf-to-gguf.py b/convert-bloom-hf-to-gguf.py index 7bfc95ec1..14dbd793c 100755 --- a/convert-bloom-hf-to-gguf.py +++ b/convert-bloom-hf-to-gguf.py @@ -129,7 +129,7 @@ gguf_writer.add_token_list(tokens) gguf_writer.add_token_scores(scores) gguf_writer.add_token_types(toktypes) -special_vocab = gguf.SpecialVocab(dir_model, load_merges=True) +special_vocab = gguf.SpecialVocab(dir_model, load_merges=True, n_vocab = len(tokens)) special_vocab.add_to_gguf(gguf_writer) # TENSORS diff --git a/convert-falcon-hf-to-gguf.py b/convert-falcon-hf-to-gguf.py index 1d98c51ad..8e8f3c3f8 100755 --- a/convert-falcon-hf-to-gguf.py +++ b/convert-falcon-hf-to-gguf.py @@ -152,7 +152,7 @@ gguf_writer.add_token_list(tokens) gguf_writer.add_token_scores(scores) gguf_writer.add_token_types(toktypes) -special_vocab = gguf.SpecialVocab(dir_model, load_merges = True) +special_vocab = gguf.SpecialVocab(dir_model, load_merges = True, n_vocab = len(tokens)) special_vocab.add_to_gguf(gguf_writer) # TENSORS diff --git a/convert-gptneox-hf-to-gguf.py b/convert-gptneox-hf-to-gguf.py index d4e85f518..f1599b0c4 100755 --- a/convert-gptneox-hf-to-gguf.py +++ b/convert-gptneox-hf-to-gguf.py @@ -134,7 +134,7 @@ gguf_writer.add_token_list(tokens) gguf_writer.add_token_scores(scores) gguf_writer.add_token_types(toktypes) -special_vocab = gguf.SpecialVocab(dir_model, load_merges = True) +special_vocab = gguf.SpecialVocab(dir_model, load_merges = True, n_vocab = len(tokens)) special_vocab.add_to_gguf(gguf_writer) # TENSORS diff --git a/convert-llama-ggml-to-gguf.py b/convert-llama-ggml-to-gguf.py index b5d3e0b3c..871add64d 100755 --- a/convert-llama-ggml-to-gguf.py +++ b/convert-llama-ggml-to-gguf.py @@ -388,7 +388,9 @@ def handle_metadata(cfg, hp): cfg.vocab_dir if cfg.vocab_dir is not None else cfg.model_metadata_dir, cfg.vocabtype ) # FIXME: Respect cfg.vocab_dir? - svocab = gguf.SpecialVocab(cfg.model_metadata_dir) + svocab = gguf.SpecialVocab(cfg.model_metadata_dir, + load_merges = cfg.vocabtype == 'bpe', + n_vocab = vocab.vocab_size) convert.check_vocab_size(params, vocab) return (params, vocab, svocab) diff --git a/convert-mpt-hf-to-gguf.py b/convert-mpt-hf-to-gguf.py index 19a66820d..21b9fd507 100755 --- a/convert-mpt-hf-to-gguf.py +++ b/convert-mpt-hf-to-gguf.py @@ -139,7 +139,7 @@ gguf_writer.add_token_list(tokens) gguf_writer.add_token_scores(scores) gguf_writer.add_token_types(toktypes) -special_vocab = gguf.SpecialVocab(dir_model, load_merges = True) +special_vocab = gguf.SpecialVocab(dir_model, load_merges = True, n_vocab = len(tokens)) special_vocab.add_to_gguf(gguf_writer) # TENSORS diff --git a/convert-refact-hf-to-gguf.py b/convert-refact-hf-to-gguf.py index bfeabc082..934f3852b 100755 --- a/convert-refact-hf-to-gguf.py +++ b/convert-refact-hf-to-gguf.py @@ -150,7 +150,7 @@ gguf_writer.add_token_list(tokens) gguf_writer.add_token_scores(scores) gguf_writer.add_token_types(toktypes) -special_vocab = gguf.SpecialVocab(dir_model, load_merges=True) +special_vocab = gguf.SpecialVocab(dir_model, load_merges=True, n_vocab = len(tokens)) special_vocab.add_to_gguf(gguf_writer) # TENSORS diff --git a/convert-starcoder-hf-to-gguf.py b/convert-starcoder-hf-to-gguf.py index 90fa0c32f..fe8815cbf 100755 --- a/convert-starcoder-hf-to-gguf.py +++ b/convert-starcoder-hf-to-gguf.py @@ -122,7 +122,7 @@ gguf_writer.add_token_list(tokens) gguf_writer.add_token_scores(scores) gguf_writer.add_token_types(toktypes) -special_vocab = gguf.SpecialVocab(dir_model, load_merges = True) +special_vocab = gguf.SpecialVocab(dir_model, load_merges = True, n_vocab = len(tokens)) special_vocab.add_to_gguf(gguf_writer) # TENSORS diff --git a/convert.py b/convert.py index 24da25efc..0680f71ea 100755 --- a/convert.py +++ b/convert.py @@ -369,7 +369,7 @@ class SentencePieceVocab: expected_ids = list(range(vocab_size, vocab_size + len(added_tokens))) actual_ids = sorted(added_tokens.values()) if expected_ids != actual_ids: - raise Exception(f"Expected added token IDs to be sequential and start at {len(added_tokens)}; got {actual_ids}") + raise Exception(f"Expected added token IDs to be sequential and start at {vocab_size}; got {actual_ids}") items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1]) self.added_tokens_list = [text for (text, idx) in items] @@ -1163,10 +1163,13 @@ def main(args_in: list[str] | None = None) -> None: vocab: Vocab if args.vocab_only: - assert args.outfile, "need --outfile if using --vocab-only" + if not args.outfile: + raise ValueError("need --outfile if using --vocab-only") # FIXME: Try to respect vocab_dir somehow? vocab = load_vocab(args.vocab_dir or args.model, args.vocabtype) - special_vocab = gguf.SpecialVocab(model_plus.paths[0].parent, load_merges = args.vocabtype == 'bpe') + special_vocab = gguf.SpecialVocab(model_plus.paths[0].parent, + load_merges = args.vocabtype == 'bpe', + n_vocab = vocab.vocab_size) outfile = args.outfile OutputFile.write_vocab_only(outfile, params, vocab, special_vocab) print(f"Wrote {outfile}") @@ -1178,7 +1181,9 @@ def main(args_in: list[str] | None = None) -> None: vocab_dir = args.vocab_dir if args.vocab_dir else model_plus.paths[0].parent vocab = load_vocab(vocab_dir, args.vocabtype) # FIXME: Try to respect vocab_dir somehow? - special_vocab = gguf.SpecialVocab(model_plus.paths[0].parent, load_merges = args.vocabtype == 'bpe') + special_vocab = gguf.SpecialVocab(model_plus.paths[0].parent, + load_merges = args.vocabtype == 'bpe', + n_vocab = vocab.vocab_size) model = model_plus.model model = convert_model_names(model, params) diff --git a/gguf-py/gguf/gguf.py b/gguf-py/gguf/gguf.py index 072c839c4..6b7d65429 100644 --- a/gguf-py/gguf/gguf.py +++ b/gguf-py/gguf/gguf.py @@ -987,12 +987,15 @@ class SpecialVocab: merges: list[str] = [] special_token_types: tuple[str, ...] = ('bos', 'eos', 'unk', 'sep', 'pad') special_token_ids: dict[str, int] = {} + n_vocab: int | None = None def __init__( self, path: str | os.PathLike[str], load_merges: bool = False, special_token_types: tuple[str, ...] | None = None, + n_vocab: int | None = None, ): self.special_token_ids = {} + self.n_vocab = n_vocab self.load_merges = load_merges if special_token_types is not None: self.special_token_types = special_token_types @@ -1002,6 +1005,16 @@ class SpecialVocab: if not self._try_load_from_tokenizer_json(path): self._try_load_from_config_json(path) + def _set_special_token(self, typ: str, tid: Any): + if not isinstance(tid, int) or tid < 0: + return + if self.n_vocab is None or tid < self.n_vocab: + self.special_token_ids[typ] = tid + return + print(f'gguf: WARNING: Special token type {typ}, id {tid} out of range, must be under {self.n_vocab} - skipping', + file = sys.stderr) + + def _try_load_from_tokenizer_json(self, path: Path) -> bool: tokenizer_file = path / 'tokenizer.json' if not tokenizer_file.is_file(): @@ -1029,10 +1042,11 @@ class SpecialVocab: tc_content = entry_content else: continue - for maybe_token_id in (atok.get('id') for atok in added_tokens if atok.get('content') == tc_content): - if isinstance(maybe_token_id, int) and maybe_token_id >= 0: - self.special_token_ids[typ] = maybe_token_id - break + # We only need the first match here. + maybe_token_id = next(( + atok.get('id') for atok in added_tokens + if atok.get('content') == tc_content), None) + self._set_special_token(typ, maybe_token_id) return True def _try_load_from_config_json(self, path: Path) -> bool: @@ -1042,21 +1056,21 @@ class SpecialVocab: with open(config_file, encoding = 'utf-8') as f: config = json.load(f) for typ in self.special_token_types: - maybe_token_id = config.get(f'{typ}_token_id') - if isinstance(maybe_token_id, int) and maybe_token_id >= 0: - self.special_token_ids[typ] = maybe_token_id + self._set_special_token(typ, config.get(f'{typ}_token_id')) return True - def add_to_gguf(self, gw: GGUFWriter) -> None: + def add_to_gguf(self, gw: GGUFWriter, quiet: bool = False) -> None: if len(self.merges) > 0: - print(f'gguf: Adding {len(self.merges)} merge(s).') + if not quiet: + print(f'gguf: Adding {len(self.merges)} merge(s).') gw.add_token_merges(self.merges) for typ, tokid in self.special_token_ids.items(): handler: Callable[[int], None] | None = getattr(gw, f'add_{typ}_token_id', None) if handler is None: - print(f'gguf: WARNING: No handler for special token type {typ} with id {tokid} - skipping') + print(f'gguf: WARNING: No handler for special token type {typ} with id {tokid} - skipping', file = sys.stderr) continue - print(f'gguf: Setting special token type {typ} to {tokid}') + if not quiet: + print(f'gguf: Setting special token type {typ} to {tokid}') handler(tokid) def __repr__(self) -> str: diff --git a/llama.cpp b/llama.cpp index 365349335..8d52eaf62 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2238,15 +2238,35 @@ static void llm_load_vocab( if (vocab.type == LLAMA_VOCAB_TYPE_SPM) { vocab.linefeed_id = llama_byte_to_token(vocab, '\n'); } else { - vocab.linefeed_id = llama_tokenize_internal(vocab, "\u010A", false)[0]; + const std::vector ids = llama_tokenize_internal(vocab, "\u010A", false); + GGML_ASSERT(!ids.empty() && "model vocab missing newline token"); + vocab.linefeed_id = ids[0]; } // special tokens - GGUF_GET_KEY(ctx, vocab.special_bos_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_BOS_ID)); - GGUF_GET_KEY(ctx, vocab.special_eos_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_EOS_ID)); - GGUF_GET_KEY(ctx, vocab.special_unk_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_UNK_ID)); - GGUF_GET_KEY(ctx, vocab.special_sep_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_SEP_ID)); - GGUF_GET_KEY(ctx, vocab.special_pad_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_PAD_ID)); + { + const std::vector> special_token_types = { + { LLM_KV_TOKENIZER_BOS_ID, vocab.special_bos_id }, + { LLM_KV_TOKENIZER_EOS_ID, vocab.special_eos_id }, + { LLM_KV_TOKENIZER_UNK_ID, vocab.special_unk_id }, + { LLM_KV_TOKENIZER_SEP_ID, vocab.special_sep_id }, + { LLM_KV_TOKENIZER_PAD_ID, vocab.special_pad_id }, + }; + for (const auto & it : special_token_types) { + const std::string & key = kv(std::get<0>(it)); + int32_t & id = std::get<1>(it), old_id = id; + + GGUF_GET_KEY(ctx, id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, key); + // Must be >= -1 and < vocab size. Since the key is unsigned, -1 + // can only come from the default value, so there's no point in + // validating that. + if (size_t(id + 1) > vocab.id_to_token.size()) { + LLAMA_LOG_WARN("%s: bad special token: '%s' = %d, using default id %d\n", + __func__, key.c_str(), id, old_id); + id = old_id; + } + } + } // build special tokens cache { @@ -6103,11 +6123,10 @@ static uint8_t llama_token_to_byte(const llama_vocab& vocab, llama_token id) { } static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch) { + static const char * hex = "0123456789ABCDEF"; switch (llama_vocab_get_type(vocab)) { case LLAMA_VOCAB_TYPE_SPM: { - char buf[7]; - int result = snprintf(buf, sizeof(buf), "<0x%02X>", ch); - GGML_ASSERT(0 <= result && result < 7); + const char buf[7] = { '<', '0', 'x', hex[ch >> 4], hex[ch & 15], '>', 0 }; return vocab.token_to_id.at(buf); } case LLAMA_VOCAB_TYPE_BPE: {