feat(llm): add Qwen3-VL-8B and Qwen2.5-Coder support, update CUDA config

- Add new `qwen3-8b-vision` model with multimodal support using mmproj file
- Add new `qwen2.5-coder-7b-instruct` model with FIM enabled via `--fim-qwen-7b-default`
- Update CUDA device usage from `CUDA0` to `CUDA1` for `olmoe-7b-instruct` and `phi-mini-8b-instruct`
- Upgrade llama.cpp to version 7426 with updated hash and CUDA architectures (61;86)
- Add Copilot acceptance shortcut `<C-J>` in insert mode and disable tab mapping
- Improve cache type settings across multiple models for better performance
This commit is contained in:
2025-12-17 09:27:15 -05:00
parent e947e13a02
commit b93249daf7
3 changed files with 31 additions and 31 deletions

View File

@@ -25,6 +25,13 @@ local function toggle_llm_fim_provider()
end end
vim.keymap.set("n", "<leader>cf", toggle_llm_fim_provider, { desc = "Toggle FIM (Llama / Copilot)" }) vim.keymap.set("n", "<leader>cf", toggle_llm_fim_provider, { desc = "Toggle FIM (Llama / Copilot)" })
-- Copilot Accept Shortcut
vim.keymap.set('i', '<C-J>', 'copilot#Accept("\\<CR>")', {
expr = true,
replace_keycodes = false
})
vim.g.copilot_no_tab_map = true
-- Configure LLama LLM FIM -- Configure LLama LLM FIM
vim.g.llama_config = { vim.g.llama_config = {
endpoint = llm_endpoint .. "/infill", endpoint = llm_endpoint .. "/infill",

View File

@@ -7,12 +7,12 @@
vulkanSupport = true; vulkanSupport = true;
}).overrideAttrs }).overrideAttrs
(oldAttrs: rec { (oldAttrs: rec {
version = "7360"; version = "7426";
src = pkgs.fetchFromGitHub { src = pkgs.fetchFromGitHub {
owner = "ggml-org"; owner = "ggml-org";
repo = "llama.cpp"; repo = "llama.cpp";
tag = "b${version}"; tag = "b${version}";
hash = "sha256-576UL3aZ4AFvxCUP2U4W1gJuAjStRZ6y0uUy/hsdRW0="; hash = "sha256-la+hA+Fw3xFjAyR4XgNmehghGS6zAKh9gHqJnlw2tMQ=";
leaveDotGit = true; leaveDotGit = true;
postFetch = '' postFetch = ''
git -C "$out" rev-parse --short HEAD > $out/COMMIT git -C "$out" rev-parse --short HEAD > $out/COMMIT
@@ -24,7 +24,7 @@
cmakeFlags = (oldAttrs.cmakeFlags or [ ]) ++ [ cmakeFlags = (oldAttrs.cmakeFlags or [ ]) ++ [
"-DGGML_NATIVE=ON" "-DGGML_NATIVE=ON"
"-DGGML_CUDA_ENABLE_UNIFIED_MEMORY=1" "-DGGML_CUDA_ENABLE_UNIFIED_MEMORY=1"
"-DCMAKE_CUDA_ARCHITECTURES=61" # GTX 1070 / GTX 1080ti "-DCMAKE_CUDA_ARCHITECTURES=61;86" # GTX 1070 / GTX 1080ti / RTX 3090
]; ];
# Disable Nix's march=native Stripping # Disable Nix's march=native Stripping

View File

@@ -5,7 +5,6 @@
}: }:
let let
inherit (lib.${namespace}) enabled; inherit (lib.${namespace}) enabled;
in in
{ {
system.stateVersion = "25.11"; system.stateVersion = "25.11";
@@ -96,37 +95,49 @@ in
# --chat-template-kwargs '{\"reasoning_effort\":\"low\"}' # --chat-template-kwargs '{\"reasoning_effort\":\"low\"}'
"gpt-oss-20b-thinking" = { "gpt-oss-20b-thinking" = {
name = "GPT OSS (20B) - Thinking"; name = "GPT OSS (20B) - Thinking";
cmd = "${pkgs.reichard.llama-cpp}/bin/llama-server --port \${PORT} -m /mnt/ssd/Models/gpt-oss-20b-heretic-v2.i1-MXFP4_MOE.gguf -c 131072 --temp 1.0 --top-p 1.0 --top-k 40 -ts 57,43"; cmd = "${pkgs.reichard.llama-cpp}/bin/llama-server --port \${PORT} -m /mnt/ssd/Models/gpt-oss-20b-heretic-v2.i1-MXFP4_MOE.gguf -c 131072 --temp 1.0 --top-p 1.0 --top-k 40 -dev CUDA0";
}; };
# https://huggingface.co/unsloth/Qwen3-Coder-30B-A3B-Instruct-GGUF/tree/main # https://huggingface.co/unsloth/Qwen3-Coder-30B-A3B-Instruct-GGUF/tree/main
"qwen3-coder-30b-instruct" = { "qwen3-coder-30b-instruct" = {
name = "Qwen3 Coder (30B) - Instruct"; name = "Qwen3 Coder (30B) - Instruct";
cmd = "${pkgs.reichard.llama-cpp}/bin/llama-server --port \${PORT} -m /mnt/ssd/Models/Qwen3-Coder-30B-A3B-Instruct-IQ4_XS.gguf -c 65536 --temp 0.7 --min-p 0.0 --top-p 0.8 --top-k 20 --repeat-penalty 1.05 -ctk q4_0 -ctv q4_0 -np 2 -kvu"; cmd = "${pkgs.reichard.llama-cpp}/bin/llama-server --port \${PORT} -m /mnt/ssd/Models/Qwen3-Coder-30B-A3B-Instruct-Q4_K_M.gguf -c 262144 --temp 0.7 --min-p 0.0 --top-p 0.8 --top-k 20 --repeat-penalty 1.05 --cache-type-k q8_0 --cache-type-v q8_0 -ts 70,30";
}; };
# https://huggingface.co/unsloth/Qwen3-30B-A3B-Instruct-2507-GGUF/tree/main # https://huggingface.co/unsloth/Qwen3-30B-A3B-Instruct-2507-GGUF/tree/main
"qwen3-30b-2507-instruct" = { "qwen3-30b-2507-instruct" = {
name = "Qwen3 2507 (30B) - Instruct"; name = "Qwen3 2507 (30B) - Instruct";
cmd = "${pkgs.reichard.llama-cpp}/bin/llama-server --port \${PORT} -m /mnt/ssd/Models/Qwen3-30B-A3B-Instruct-2507-IQ4_XS.gguf -c 65536 --temp 0.7 --min-p 0.0 --top-p 0.8 --top-k 20 --repeat-penalty 1.05 -ctk q4_0 -ctv q4_0 -np 2 -kvu"; cmd = "${pkgs.reichard.llama-cpp}/bin/llama-server --port \${PORT} -m /mnt/ssd/Models/Qwen3-30B-A3B-Instruct-2507-Q4_K_M.gguf -c 262144 --temp 0.7 --min-p 0.0 --top-p 0.8 --top-k 20 --repeat-penalty 1.05 --cache-type-k q8_0 --cache-type-v q8_0 -ts 70,30";
}; };
# https://huggingface.co/unsloth/Qwen3-30B-A3B-Thinking-2507-GGUF/tree/main # https://huggingface.co/unsloth/Qwen3-30B-A3B-Thinking-2507-GGUF/tree/main
"qwen3-30b-2507-thinking" = { "qwen3-30b-2507-thinking" = {
name = "Qwen3 2507 (30B) - Thinking"; name = "Qwen3 2507 (30B) - Thinking";
cmd = "${pkgs.reichard.llama-cpp}/bin/llama-server --port \${PORT} -m /mnt/ssd/Models/Qwen3-30B-A3B-Thinking-2507-IQ4_XS.gguf -c 65536 --temp 0.7 --min-p 0.0 --top-p 0.8 --top-k 20 --repeat-penalty 1.05 -ctk q4_0 -ctv q4_0 -np 2 -kvu"; cmd = "${pkgs.reichard.llama-cpp}/bin/llama-server --port \${PORT} -m /mnt/ssd/Models/Qwen3-30B-A3B-Thinking-2507-UD-Q4_K_XL.gguf -c 262144 --temp 0.7 --min-p 0.0 --top-p 0.8 --top-k 20 --repeat-penalty 1.05 --cache-type-k q8_0 --cache-type-v q8_0 -ts 70,30";
}; };
# https://huggingface.co/unsloth/Qwen3-Next-80B-A3B-Instruct-GGUF/tree/main # https://huggingface.co/unsloth/Qwen3-Next-80B-A3B-Instruct-GGUF/tree/main
"qwen3-next-80b-instruct" = { "qwen3-next-80b-instruct" = {
name = "Qwen3 Next (80B) - Instruct"; name = "Qwen3 Next (80B) - Instruct";
cmd = "${pkgs.reichard.llama-cpp}/bin/llama-server --port \${PORT} -m /mnt/ssd/Models/Qwen3-Next-80B-A3B-Instruct-UD-Q4_K_XL.gguf -c 32768 --temp 0.7 --min-p 0.0 --top-p 0.8 --top-k 20 -dev CUDA0 -ncmoe 39 --mlock"; cmd = "${pkgs.reichard.llama-cpp}/bin/llama-server --port \${PORT} -m /mnt/ssd/Models/Qwen3-Next-80B-A3B-Instruct-UD-Q4_K_XL.gguf --ctx-size 262144 --temp 0.7 --min-p 0.0 --top-p 0.8 --top-k 20 --repeat-penalty 1.05 --cache-type-k q8_0 --cache-type-v q8_0";
}; };
# https://huggingface.co/unsloth/Devstral-Small-2-24B-Instruct-2512-GGUF/tree/main # https://huggingface.co/unsloth/Devstral-Small-2-24B-Instruct-2512-GGUF/tree/main
"devstral-small-2-instruct" = { "devstral-small-2-instruct" = {
name = "Devstral Small 2 (24B) - Instruct"; name = "Devstral Small 2 (24B) - Instruct";
cmd = "${pkgs.reichard.llama-cpp}/bin/llama-server --port \${PORT} -m /mnt/ssd/Models/Devstral-Small-2-24B-Instruct-2512-IQ4_NL.gguf -fa 1 -c 65536 -ctk q4_0 -ctv q4_0"; cmd = "${pkgs.reichard.llama-cpp}/bin/llama-server --port \${PORT} -m /mnt/ssd/Models/Devstral-Small-2-24B-Instruct-2512-UD-Q4_K_XL.gguf -c 98304 -ctk q8_0 -ctv q8_0 -dev CUDA0";
};
# https://huggingface.co/unsloth/Qwen3-VL-8B-Instruct-GGUF/tree/main
"qwen3-8b-vision" = {
name = "Qwen3 Vision (8B) - Thinking";
cmd = "${pkgs.reichard.llama-cpp}/bin/llama-server --port \${PORT} -m /mnt/ssd/Models/Qwen3-VL-8B-Instruct-UD-Q4_K_XL.gguf --mmproj /mnt/ssd/Models/Qwen3-VL-8B-Instruct-UD-Q4_K_XL_mmproj-F16.gguf -c 131072 --temp 0.7 --min-p 0.0 --top-p 0.8 --top-k 20 -ctk q8_0 -ctv q8_0 -dev CUDA0";
};
# https://huggingface.co/unsloth/Qwen2.5-Coder-7B-Instruct-128K-GGUF/tree/main
"qwen2.5-coder-7b-instruct" = {
name = "Qwen2.5 Coder (7B) - Instruct";
cmd = "${pkgs.reichard.llama-cpp}/bin/llama-server -m /mnt/ssd/Models/Qwen2.5-Coder-7B-Instruct-Q8_0.gguf --fim-qwen-7b-default -c 131072 --port \${PORT} --dev CUDA0";
}; };
# https://huggingface.co/unsloth/SmolLM3-3B-128K-GGUF/tree/main # https://huggingface.co/unsloth/SmolLM3-3B-128K-GGUF/tree/main
@@ -141,34 +152,16 @@ in
cmd = "${pkgs.reichard.llama-cpp}/bin/llama-server --port \${PORT} -m /mnt/ssd/Models/ERNIE-4.5-21B-A3B-PT-UD-Q4_K_XL.gguf -c 98304 --temp 0.7 --min-p 0.0 --top-p 0.8 --top-k 20"; cmd = "${pkgs.reichard.llama-cpp}/bin/llama-server --port \${PORT} -m /mnt/ssd/Models/ERNIE-4.5-21B-A3B-PT-UD-Q4_K_XL.gguf -c 98304 --temp 0.7 --min-p 0.0 --top-p 0.8 --top-k 20";
}; };
# https://huggingface.co/unsloth/Qwen2.5-Coder-7B-Instruct-128K-GGUF/tree/main
"qwen2.5-coder-7b-instruct" = {
name = "Qwen2.5 Coder (7B) - Instruct";
cmd = "${pkgs.reichard.llama-cpp}/bin/llama-server -m /mnt/ssd/Models/Qwen2.5-Coder-7B-Instruct-Q8_0.gguf --fim-qwen-7b-default -c 131072 --port \${PORT}";
};
# https://huggingface.co/unsloth/Qwen2.5-Coder-3B-Instruct-128K-GGUF/tree/main
"qwen2.5-coder-3b-instruct" = {
name = "Qwen2.5 Coder (3B) - Instruct";
cmd = "${pkgs.reichard.llama-cpp}/bin/llama-server -m /mnt/ssd/Models/Qwen2.5-Coder-3B-Instruct-Q4_K_M.gguf --fim-qwen-3b-default -c 20000 -ts 60,40 --port \${PORT}";
};
# https://huggingface.co/unsloth/Qwen3-VL-8B-Instruct-GGUF/tree/main
"qwen3-8b-vision" = {
name = "Qwen3 Vision (8B) - Thinking";
cmd = "${pkgs.reichard.llama-cpp}/bin/llama-server --port \${PORT} -m /mnt/ssd/Models/Qwen3-VL-8B-Instruct-UD-Q4_K_XL.gguf --mmproj /mnt/ssd/Models/Qwen3-VL-8B-Instruct-UD-Q4_K_XL_mmproj-F16.gguf -c 131072 --temp 0.7 --min-p 0.0 --top-p 0.8 --top-k 20 -ctk q4_0 -ctk q4_0";
};
# https://huggingface.co/mradermacher/OLMoE-1B-7B-0125-Instruct-GGUF/tree/main # https://huggingface.co/mradermacher/OLMoE-1B-7B-0125-Instruct-GGUF/tree/main
"olmoe-7b-instruct" = { "olmoe-7b-instruct" = {
name = "OLMoE (7B) - Instruct"; name = "OLMoE (7B) - Instruct";
cmd = "${pkgs.reichard.llama-cpp}/bin/llama-server --port \${PORT} -m /mnt/ssd/Models/OLMoE-1B-7B-0125-Instruct.Q8_0.gguf -dev CUDA0"; cmd = "${pkgs.reichard.llama-cpp}/bin/llama-server --port \${PORT} -m /mnt/ssd/Models/OLMoE-1B-7B-0125-Instruct.Q8_0.gguf -dev CUDA1";
}; };
# https://huggingface.co/gabriellarson/Phi-mini-MoE-instruct-GGUF/tree/main # https://huggingface.co/gabriellarson/Phi-mini-MoE-instruct-GGUF/tree/main
"phi-mini-8b-instruct" = { "phi-mini-8b-instruct" = {
name = "Phi mini (8B) - Instruct"; name = "Phi mini (8B) - Instruct";
cmd = "${pkgs.reichard.llama-cpp}/bin/llama-server --port \${PORT} -m /mnt/ssd/Models/Phi-mini-MoE-instruct-Q8_0.gguf --repeat-penalty 1.05 --temp 0.0 --top-p 1.0 --top-k 1 -dev CUDA0"; cmd = "${pkgs.reichard.llama-cpp}/bin/llama-server --port \${PORT} -m /mnt/ssd/Models/Phi-mini-MoE-instruct-Q8_0.gguf --repeat-penalty 1.05 --temp 0.0 --top-p 1.0 --top-k 1 -dev CUDA1";
}; };
}; };
groups = { groups = {
@@ -190,6 +183,6 @@ in
git git
tmux tmux
vim vim
llama-cpp reichard.llama-cpp
]; ];
} }