coreml : fix ANE optimized encoder (#1716)

pull/1728/head
Yajing Tang 2024-01-04 06:28:30 -08:00 committed by GitHub
parent ab0a8593c5
commit ba5bcde874
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 4 additions and 17 deletions

View File

@ -24,9 +24,9 @@ struct whisper_coreml_context * whisper_coreml_init(const char * path_model) {
// select which device to run the Core ML model on
MLModelConfiguration *config = [[MLModelConfiguration alloc] init];
config.computeUnits = MLComputeUnitsCPUAndGPU;
// config.computeUnits = MLComputeUnitsCPUAndGPU;
//config.computeUnits = MLComputeUnitsCPUAndNeuralEngine;
//config.computeUnits = MLComputeUnitsAll;
config.computeUnits = MLComputeUnitsAll;
const void * data = CFBridgingRetain([[whisper_encoder_impl alloc] initWithContentsOfURL:url_model configuration:config error:nil]);

View File

@ -143,20 +143,7 @@ class AudioEncoderANE(AudioEncoder):
x = block(x)
x = self.ln_post(x)
# """
# TODO:
# I think we need to transpose the result here to make it fit whisper.cpp memory order.
# However, even doing this, the results are still wrong. Kind of less wrong compared to
# not transposing, but still wrong.
# Also, I don't know why the original OpenAI implementation does not need to transpose
# transpose to (batch_size, n_ctx, n_state)
# x : torch.Tensor, shape = (batch_size, n_state, 1, n_ctx)
# """
# x = x.transpose(1,3)
x = x.squeeze(2).transpose(1, 2)
return x

View File

@ -23,7 +23,7 @@ if [[ $mname == "-h5" ]]; then
echo $mpath
python3 models/convert-h5-to-coreml.py --model-name $mname --model-path $mpath --encoder-only True
else
python3 models/convert-whisper-to-coreml.py --model $mname --encoder-only True
python3 models/convert-whisper-to-coreml.py --model $mname --encoder-only True --optimize-ane True
fi
xcrun coremlc compile models/coreml-encoder-${mname}.mlpackage models/