From b220222a64ce760bfbec9c770f11db3ec6a6abb6 Mon Sep 17 00:00:00 2001 From: Miwa / Ensan <63481257+ensan-hcl@users.noreply.github.com> Date: Sat, 2 Dec 2023 03:19:45 +0900 Subject: [PATCH] swift : fix token_to_piece implementation (#4278) * Fix token_to_piece implementation in Swift * Fix errors --- examples/batched.swift/Sources/main.swift | 10 +++------ .../llama.cpp.swift/LibLlama.swift | 22 +++++++++++++------ 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/examples/batched.swift/Sources/main.swift b/examples/batched.swift/Sources/main.swift index ba15197ae..ce9d80d9b 100644 --- a/examples/batched.swift/Sources/main.swift +++ b/examples/batched.swift/Sources/main.swift @@ -230,18 +230,15 @@ private func token_to_piece(token: llama_token, buffer: inout [CChar]) -> String var result = [CChar](repeating: 0, count: 8) let nTokens = llama_token_to_piece(model, token, &result, Int32(result.count)) if nTokens < 0 { - if result.count >= -Int(nTokens) { - result.removeLast(-Int(nTokens)) - } else { - result.removeAll() - } + let actualTokensCount = -Int(nTokens) + result = .init(repeating: 0, count: actualTokensCount) let check = llama_token_to_piece( model, token, &result, Int32(result.count) ) - assert(check == nTokens) + assert(check == actualTokensCount) } else { result.removeLast(result.count - Int(nTokens)) } @@ -259,5 +256,4 @@ private func token_to_piece(token: llama_token, buffer: inout [CChar]) -> String buffer = [] return bufferString } - return nil } diff --git a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift index aaef09611..09b36d9e6 100644 --- a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift +++ b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift @@ -164,13 +164,21 @@ actor LlamaContext { private func token_to_piece(token: llama_token) -> String { let result = UnsafeMutablePointer.allocate(capacity: 8) result.initialize(repeating: Int8(0), count: 8) + defer { + result.deallocate() + } + let nTokens = llama_token_to_piece(model, token, result, 8) - let _ = llama_token_to_piece(model, token, result, 8) - - let resultStr = String(cString: result) - - result.deallocate() - - return resultStr + if nTokens < 0 { + let newResult = UnsafeMutablePointer.allocate(capacity: Int(-nTokens)) + newResult.initialize(repeating: Int8(0), count: Int(-nTokens)) + defer { + newResult.deallocate() + } + _ = llama_token_to_piece(model, token, newResult, -nTokens) + return String(cString: newResult) + } else { + return String(cString: result) + } } }