Files
nix/packages/llama-cpp/default.nix
Evan Reichard 7080727dce chore: update llama-cpp to b7898 and opencode to v1.1.48
- Update llama-cpp from b7867 to b7898
- Update opencode from v1.1.12 to v1.1.48 with improved build process:
  - Replace custom bundle script with official script/build.ts
  - Add shell completion support
  - Add version check testing
  - Simplify node_modules handling
- Update llama-swap service config with new llama.cpp options
- Clarify opencode agent testing workflow in developer and reviewer configs
2026-02-03 20:33:14 -05:00

42 lines
1.1 KiB
Nix

{ pkgs }:
(pkgs.llama-cpp.override {
cudaSupport = true;
blasSupport = true;
rocmSupport = false;
metalSupport = false;
vulkanSupport = true;
}).overrideAttrs
(oldAttrs: rec {
version = "7898";
src = pkgs.fetchFromGitHub {
owner = "ggml-org";
repo = "llama.cpp";
tag = "b${version}";
hash = "sha256-ST7hhE5lWOm46WS+k9lkHJqVQpz8squwHZWE2/XG6MY=";
leaveDotGit = true;
postFetch = ''
git -C "$out" rev-parse --short HEAD > $out/COMMIT
find "$out" -name .git -print0 | xargs -0 rm -rf
'';
};
# Auto CPU Optimizations
cmakeFlags = (oldAttrs.cmakeFlags or [ ]) ++ [
"-DGGML_CUDA_ENABLE_UNIFIED_MEMORY=1"
"-DCMAKE_CUDA_ARCHITECTURES=61;86" # GTX 1070 / GTX 1080ti / RTX 3090
];
# Disable Nix's march=native Stripping
preConfigure = ''
export NIX_ENFORCE_NO_NATIVE=0
${oldAttrs.preConfigure or ""}
'';
# Apply Patches
patchFlags = [ "-p1" ];
patches = (oldAttrs.patches or [ ]) ++ [
./oneof-not-unrecognized-schema.patch
./additionalprops-unrecognized-schema.patch
];
})