feat(llm): add Qwen3-VL-8B and Qwen2.5-Coder support, update CUDA config

- Add new `qwen3-8b-vision` model with multimodal support using mmproj file
- Add new `qwen2.5-coder-7b-instruct` model with FIM enabled via `--fim-qwen-7b-default`
- Update CUDA device usage from `CUDA0` to `CUDA1` for `olmoe-7b-instruct` and `phi-mini-8b-instruct`
- Upgrade llama.cpp to version 7426 with updated hash and CUDA architectures (61;86)
- Add Copilot acceptance shortcut `<C-J>` in insert mode and disable tab mapping
- Improve cache type settings across multiple models for better performance
This commit is contained in:
2025-12-17 09:27:15 -05:00
parent e947e13a02
commit b93249daf7
3 changed files with 31 additions and 31 deletions

View File

@@ -7,12 +7,12 @@
vulkanSupport = true;
}).overrideAttrs
(oldAttrs: rec {
version = "7360";
version = "7426";
src = pkgs.fetchFromGitHub {
owner = "ggml-org";
repo = "llama.cpp";
tag = "b${version}";
hash = "sha256-576UL3aZ4AFvxCUP2U4W1gJuAjStRZ6y0uUy/hsdRW0=";
hash = "sha256-la+hA+Fw3xFjAyR4XgNmehghGS6zAKh9gHqJnlw2tMQ=";
leaveDotGit = true;
postFetch = ''
git -C "$out" rev-parse --short HEAD > $out/COMMIT
@@ -24,7 +24,7 @@
cmakeFlags = (oldAttrs.cmakeFlags or [ ]) ++ [
"-DGGML_NATIVE=ON"
"-DGGML_CUDA_ENABLE_UNIFIED_MEMORY=1"
"-DCMAKE_CUDA_ARCHITECTURES=61" # GTX 1070 / GTX 1080ti
"-DCMAKE_CUDA_ARCHITECTURES=61;86" # GTX 1070 / GTX 1080ti / RTX 3090
];
# Disable Nix's march=native Stripping