From f305bad11e10ad09e396faed2e37f4f845f5d566 Mon Sep 17 00:00:00 2001 From: Volodymyr Vitvitskyi <72226+signalpillar@users.noreply.github.com> Date: Sat, 26 Aug 2023 14:25:39 +0100 Subject: [PATCH] flake : build llama.cpp on Intel with nix (#2795) Problem ------- `nix build` fails with missing `Accelerate.h`. Changes ------- - Fix build of the llama.cpp with nix for Intel: add the same SDK frameworks as for ARM - Add `quantize` app to the output of nix flake - Extend nix devShell with llama-python so we can use convertScript Testing ------- Testing the steps with nix: 1. `nix build` Get the model and then 2. `nix develop` and then `python convert.py models/llama-2-7b.ggmlv3.q4_0.bin` 3. `nix run llama.cpp#quantize -- open_llama_7b/ggml-model-f16.gguf ./models/ggml-model-q4_0.bin 2` 4. `nix run llama.cpp#llama -- -m models/ggml-model-q4_0.bin -p "What is nix?" -n 400 --temp 0.8 -e -t 8` Co-authored-by: Volodymyr Vitvitskyi --- flake.nix | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/flake.nix b/flake.nix index 616b90252..d454cedc3 100644 --- a/flake.nix +++ b/flake.nix @@ -21,6 +21,12 @@ CoreGraphics CoreVideo ] + else if isDarwin then + with pkgs.darwin.apple_sdk.frameworks; [ + Accelerate + CoreGraphics + CoreVideo + ] else with pkgs; [ openblas ] ); @@ -80,8 +86,13 @@ type = "app"; program = "${self.packages.${system}.default}/bin/llama"; }; + apps.quantize = { + type = "app"; + program = "${self.packages.${system}.default}/bin/quantize"; + }; apps.default = self.apps.${system}.llama; devShells.default = pkgs.mkShell { + buildInputs = [ llama-python ]; packages = nativeBuildInputs ++ osSpecific; }; });