diff --git a/packages/llama-cpp/default.nix b/packages/llama-cpp/default.nix index adfd2b1..d6a1404 100644 --- a/packages/llama-cpp/default.nix +++ b/packages/llama-cpp/default.nix @@ -7,12 +7,12 @@ vulkanSupport = true; }).overrideAttrs (oldAttrs: rec { - version = "8680"; + version = "8815"; src = pkgs.fetchFromGitHub { owner = "ggml-org"; repo = "llama.cpp"; tag = "b${version}"; - hash = "sha256-tJCA19BQs0vZc0VjPnbIrh3CJFxyPL6Ne4oIG4gfozw="; + hash = "sha256-QJsGBHLdvFfMXZJSk9D76b7v6DP06NaTYztHv41o/CA="; leaveDotGit = true; postFetch = '' git -C "$out" rev-parse --short HEAD > $out/COMMIT @@ -20,6 +20,11 @@ ''; }; + # Add SPIR-V Headers for Vulkan Backend + # Newer llama.cpp requires spirv/unified1/spirv.hpp which isn't + # pulled in by vulkan-headers alone. + buildInputs = (oldAttrs.buildInputs or [ ]) ++ [ pkgs.spirv-headers ]; + # Auto CPU Optimizations cmakeFlags = (oldAttrs.cmakeFlags or [ ]) ++ [ "-DGGML_CUDA_ENABLE_UNIFIED_MEMORY=1"