Add qwen3.6-27b-mtp-thinking model config with 150K context, MTP speculative decoding, and thinking mode support. Bump llama-cpp from b9009 to b9045 and apply MTP patch from upstream PR #22673.
50 lines
1.4 KiB
Nix
50 lines
1.4 KiB
Nix
{ pkgs }:
|
|
(pkgs.llama-cpp.override {
|
|
cudaSupport = true;
|
|
blasSupport = true;
|
|
rocmSupport = false;
|
|
metalSupport = false;
|
|
vulkanSupport = true;
|
|
}).overrideAttrs
|
|
(oldAttrs: rec {
|
|
version = "9045";
|
|
src = pkgs.fetchFromGitHub {
|
|
owner = "ggml-org";
|
|
repo = "llama.cpp";
|
|
tag = "b${version}";
|
|
hash = "sha256-fdHGxJaMx/VG7twXdWvHdkThAOSFJTbjAnpRxsNx5l0=";
|
|
leaveDotGit = true;
|
|
postFetch = ''
|
|
git -C "$out" rev-parse --short HEAD > $out/COMMIT
|
|
find "$out" -name .git -print0 | xargs -0 rm -rf
|
|
'';
|
|
};
|
|
|
|
# Add SPIR-V Headers for Vulkan Backend
|
|
# Newer llama.cpp requires spirv/unified1/spirv.hpp which isn't
|
|
# pulled in by vulkan-headers alone.
|
|
buildInputs = (oldAttrs.buildInputs or [ ]) ++ [ pkgs.spirv-headers ];
|
|
|
|
# Auto CPU Optimizations
|
|
cmakeFlags = (oldAttrs.cmakeFlags or [ ]) ++ [
|
|
"-DGGML_CUDA_ENABLE_UNIFIED_MEMORY=1"
|
|
"-DCMAKE_CUDA_ARCHITECTURES=61;86" # GTX 1070 / GTX 1080ti / RTX 3090
|
|
];
|
|
|
|
# Disable Nix's march=native Stripping
|
|
preConfigure = ''
|
|
export NIX_ENFORCE_NO_NATIVE=0
|
|
${oldAttrs.preConfigure or ""}
|
|
'';
|
|
|
|
# Apply Patches
|
|
patchFlags = [ "-p1" ];
|
|
patches = (oldAttrs.patches or [ ]) ++ [
|
|
(pkgs.fetchpatch {
|
|
name = "mtp.patch";
|
|
url = "https://github.com/ggml-org/llama.cpp/pull/22673.patch";
|
|
hash = "sha256-jM4X+jy7JhOAn2v/U9mmWM/507DKaW8d/dhR78HZWFQ=";
|
|
})
|
|
];
|
|
})
|