- Bump llama-cpp from version 8157 to 8196 - Bump llama-swap from version 192 to 197 - Switch default assistant model from qwen3-coder-next-80b to qwen3.5-27b-thinking - Remove glm-4-32b-instruct model configuration - Update qwen3.5-27b-thinking config: - Use bartowski quantization (IQ4_XS) instead of unsloth - Increase context window from 131k to 196k - Add cache type settings (q8_0) and CUDA device - Add 1password-cli to home-manager programs - Fix typo: 'dispay' -> 'display' in llm-config.lua
35 lines
931 B
Nix
35 lines
931 B
Nix
{ pkgs }:
|
|
(pkgs.llama-cpp.override {
|
|
cudaSupport = true;
|
|
blasSupport = true;
|
|
rocmSupport = false;
|
|
metalSupport = false;
|
|
vulkanSupport = true;
|
|
}).overrideAttrs
|
|
(oldAttrs: rec {
|
|
version = "8196";
|
|
src = pkgs.fetchFromGitHub {
|
|
owner = "ggml-org";
|
|
repo = "llama.cpp";
|
|
tag = "b${version}";
|
|
hash = "sha256-GZRHiyT8mvhV5RTczDRnCSh31UxRZ3F8tEBC1l8oFNQ=";
|
|
leaveDotGit = true;
|
|
postFetch = ''
|
|
git -C "$out" rev-parse --short HEAD > $out/COMMIT
|
|
find "$out" -name .git -print0 | xargs -0 rm -rf
|
|
'';
|
|
};
|
|
|
|
# Auto CPU Optimizations
|
|
cmakeFlags = (oldAttrs.cmakeFlags or [ ]) ++ [
|
|
"-DGGML_CUDA_ENABLE_UNIFIED_MEMORY=1"
|
|
"-DCMAKE_CUDA_ARCHITECTURES=61;86" # GTX 1070 / GTX 1080ti / RTX 3090
|
|
];
|
|
|
|
# Disable Nix's march=native Stripping
|
|
preConfigure = ''
|
|
export NIX_ENFORCE_NO_NATIVE=0
|
|
${oldAttrs.preConfigure or ""}
|
|
'';
|
|
})
|