chore(llama-swap): bump llama-cpp to b9048 and swap in UD-Q4/Q6 MTP configs
Replace qwen3.6-27b-thinking and qwen3.6-27b-mtp-thinking with qwen3.6-27b-udq4-thinking (single GPU) and qwen3.6-27b-udq6-thinking (dual GPU). Update aliases and concurrent set accordingly.
This commit is contained in:
@@ -57,54 +57,14 @@ in
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
# https://huggingface.co/unsloth/Qwen3.6-27B-GGUF/tree/main
|
# https://huggingface.co/unsloth/Qwen3.6-27B-GGUF-MTP/tree/main
|
||||||
"qwen3.6-27b-thinking" = {
|
"qwen3.6-27b-udq4-thinking" = {
|
||||||
name = "Qwen3.6 (27B) - Thinking";
|
name = "Qwen3.6 (27B) - Thinking (UD-Q4)";
|
||||||
macros.ctx = "196608";
|
|
||||||
cmd = ''
|
|
||||||
${llama-cpp}/bin/llama-server \
|
|
||||||
--port ''${PORT} \
|
|
||||||
-m /mnt/ssd/Models/Qwen3.6/Qwen3.6-27B-IQ4_XS.gguf \
|
|
||||||
-c ''${ctx} \
|
|
||||||
--parallel 2 \
|
|
||||||
--temp 0.6 \
|
|
||||||
--top-p 0.95 \
|
|
||||||
--top-k 20 \
|
|
||||||
--min-p 0.00 \
|
|
||||||
--presence-penalty 1.5 \
|
|
||||||
-ctk q8_0 \
|
|
||||||
-ctv q8_0 \
|
|
||||||
--keep 3000 \
|
|
||||||
--batch-size 4096 \
|
|
||||||
--ubatch-size 1024 \
|
|
||||||
--spec-type ngram-mod \
|
|
||||||
--spec-ngram-mod-n-match 24 \
|
|
||||||
--spec-draft-n-min 16 \
|
|
||||||
--spec-draft-n-max 64 \
|
|
||||||
-dev CUDA0 \
|
|
||||||
-fit off \
|
|
||||||
--chat-template-kwargs "{\"preserve_thinking\": true}"
|
|
||||||
'';
|
|
||||||
# --chat-template-kwargs "{\"enable_thinking\": false}"
|
|
||||||
# --spec-draft-n-min 16 \
|
|
||||||
# --spec-draft-n-max 32 \
|
|
||||||
metadata = {
|
|
||||||
type = [
|
|
||||||
"text-generation"
|
|
||||||
"coding"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# https://huggingface.co/localweights/Qwen3.6-27B-MTP-IQ4_XS-GGUF/tree/main
|
|
||||||
"qwen3.6-27b-mtp-thinking" = {
|
|
||||||
name = "Qwen3.6 (27B) - Thinking (MTP)";
|
|
||||||
macros.ctx = "150000";
|
macros.ctx = "150000";
|
||||||
#
|
|
||||||
cmd = ''
|
cmd = ''
|
||||||
${llama-cpp}/bin/llama-server \
|
${llama-cpp}/bin/llama-server \
|
||||||
--port ''${PORT} \
|
--port ''${PORT} \
|
||||||
-m /mnt/ssd/Models/Qwen3.6/Qwen3.6-27B-MTP-IQ4_XS.gguf \
|
-m /mnt/ssd/Models/Qwen3.6/Qwen3.6-27B-UD-Q4_K_XL.gguf \
|
||||||
-c ''${ctx} \
|
-c ''${ctx} \
|
||||||
--parallel 1 \
|
--parallel 1 \
|
||||||
--temp 0.6 \
|
--temp 0.6 \
|
||||||
@@ -115,7 +75,7 @@ in
|
|||||||
-ctk q8_0 \
|
-ctk q8_0 \
|
||||||
-ctv q8_0 \
|
-ctv q8_0 \
|
||||||
--spec-type mtp \
|
--spec-type mtp \
|
||||||
--spec-draft-n-max 5 \
|
--spec-draft-n-max 3 \
|
||||||
-dev CUDA0 \
|
-dev CUDA0 \
|
||||||
-fit off \
|
-fit off \
|
||||||
--chat-template-kwargs "{\"preserve_thinking\": true}"
|
--chat-template-kwargs "{\"preserve_thinking\": true}"
|
||||||
@@ -132,7 +92,6 @@ in
|
|||||||
"gemma-4-26b-vision" = {
|
"gemma-4-26b-vision" = {
|
||||||
name = "Gemma 4 (26B) - Vision";
|
name = "Gemma 4 (26B) - Vision";
|
||||||
macros.ctx = "196608";
|
macros.ctx = "196608";
|
||||||
# 262144
|
|
||||||
cmd = ''
|
cmd = ''
|
||||||
${llama-cpp}/bin/llama-server \
|
${llama-cpp}/bin/llama-server \
|
||||||
--port ''${PORT} \
|
--port ''${PORT} \
|
||||||
@@ -152,7 +111,6 @@ in
|
|||||||
-fit off \
|
-fit off \
|
||||||
-dev CUDA0
|
-dev CUDA0
|
||||||
'';
|
'';
|
||||||
# --no-mmproj-offload \
|
|
||||||
metadata = {
|
metadata = {
|
||||||
type = [
|
type = [
|
||||||
"text-generation"
|
"text-generation"
|
||||||
@@ -648,6 +606,38 @@ in
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
# https://huggingface.co/unsloth/Qwen3.6-27B-GGUF-MTP/tree/main
|
||||||
|
"qwen3.6-27b-udq6-thinking" = {
|
||||||
|
name = "Qwen3.6 (27B) - Thinking (UD-Q6)";
|
||||||
|
macros.ctx = "225000";
|
||||||
|
cmd = ''
|
||||||
|
${llama-cpp}/bin/llama-server \
|
||||||
|
--port ''${PORT} \
|
||||||
|
-m /mnt/ssd/Models/Qwen3.6/Qwen3.6-27B-UD-Q6_K_XL.gguf \
|
||||||
|
-c ''${ctx} \
|
||||||
|
--parallel 1 \
|
||||||
|
--temp 0.6 \
|
||||||
|
--top-p 0.95 \
|
||||||
|
--top-k 20 \
|
||||||
|
--min-p 0.00 \
|
||||||
|
--presence-penalty 0.0 \
|
||||||
|
-ctk q8_0 \
|
||||||
|
-ctv q8_0 \
|
||||||
|
--spec-type mtp \
|
||||||
|
--spec-draft-n-max 3 \
|
||||||
|
-dev CUDA0,CUDA1 \
|
||||||
|
-ts 75,25 \
|
||||||
|
-fit off \
|
||||||
|
--chat-template-kwargs "{\"preserve_thinking\": true}"
|
||||||
|
'';
|
||||||
|
metadata = {
|
||||||
|
type = [
|
||||||
|
"text-generation"
|
||||||
|
"coding"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
# ---------------------------------------
|
# ---------------------------------------
|
||||||
# ---------- Stable Diffussion ----------
|
# ---------- Stable Diffussion ----------
|
||||||
# ---------------------------------------
|
# ---------------------------------------
|
||||||
@@ -752,8 +742,7 @@ in
|
|||||||
go = "gpt-oss-20b-thinking";
|
go = "gpt-oss-20b-thinking";
|
||||||
g4 = "gemma-4-26b-vision";
|
g4 = "gemma-4-26b-vision";
|
||||||
q36a = "qwen3.6-35b-thinking";
|
q36a = "qwen3.6-35b-thinking";
|
||||||
q36b = "qwen3.6-27b-thinking";
|
q36b = "qwen3.6-27b-udq4-thinking";
|
||||||
q36bmtp = "qwen3.6-27b-mtp-thinking";
|
|
||||||
zi = "z-image-turbo";
|
zi = "z-image-turbo";
|
||||||
qie = "qwen-image-edit-2511";
|
qie = "qwen-image-edit-2511";
|
||||||
qi = "qwen-image-2512";
|
qi = "qwen-image-2512";
|
||||||
@@ -766,7 +755,7 @@ in
|
|||||||
};
|
};
|
||||||
|
|
||||||
sets = {
|
sets = {
|
||||||
concurrent = "(go | g4 | q36a | q36b | q36bmtp | vlt | vtt | vlv | zi | qie | qi | cr) & (qv | q4 | q9)";
|
concurrent = "(go | g4 | q36a | q36b | vlt | vtt | vlv | zi | qie | qi | cr) & (qv | q4 | q9)";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,12 +7,12 @@
|
|||||||
vulkanSupport = true;
|
vulkanSupport = true;
|
||||||
}).overrideAttrs
|
}).overrideAttrs
|
||||||
(oldAttrs: rec {
|
(oldAttrs: rec {
|
||||||
version = "9045";
|
version = "9048";
|
||||||
src = pkgs.fetchFromGitHub {
|
src = pkgs.fetchFromGitHub {
|
||||||
owner = "ggml-org";
|
owner = "ggml-org";
|
||||||
repo = "llama.cpp";
|
repo = "llama.cpp";
|
||||||
tag = "b${version}";
|
tag = "b${version}";
|
||||||
hash = "sha256-fdHGxJaMx/VG7twXdWvHdkThAOSFJTbjAnpRxsNx5l0=";
|
hash = "sha256-lYtX0hLReCnFw1+xOKefly+WunuoN89ZFEFl5mK5pQ4=";
|
||||||
leaveDotGit = true;
|
leaveDotGit = true;
|
||||||
postFetch = ''
|
postFetch = ''
|
||||||
git -C "$out" rev-parse --short HEAD > $out/COMMIT
|
git -C "$out" rev-parse --short HEAD > $out/COMMIT
|
||||||
@@ -43,7 +43,7 @@
|
|||||||
(pkgs.fetchpatch {
|
(pkgs.fetchpatch {
|
||||||
name = "mtp.patch";
|
name = "mtp.patch";
|
||||||
url = "https://github.com/ggml-org/llama.cpp/pull/22673.patch";
|
url = "https://github.com/ggml-org/llama.cpp/pull/22673.patch";
|
||||||
hash = "sha256-jM4X+jy7JhOAn2v/U9mmWM/507DKaW8d/dhR78HZWFQ=";
|
hash = "sha256-HqpchhOpxuw5mY4a/OCWGDr2Y32rC4FeOHuhaVt+mvY=";
|
||||||
})
|
})
|
||||||
];
|
];
|
||||||
})
|
})
|
||||||
|
|||||||
Reference in New Issue
Block a user