Compare commits

...

2 Commits

Author SHA1 Message Date
90eed0378e chore(llama): update llama-cpp and remove deprecated models
- Upgrade llama-cpp from version 8196 to 8229
- Remove GPT OSS CSEC (20B) - Thinking model config
- Remove Qwen3 Next (80B) - Instruct model config
- Increment -ncmoe parameter for Qwen3 Coder Next (80B) model
2026-03-07 08:55:00 -05:00
36873c324b refactor(hyprland): adjust brightness step size and format code
- Change brightness key bindings from 10% to 4%/5% for finer control
- Reformat hyprland module to use one parameter per line
2026-03-07 08:25:00 -05:00
3 changed files with 11 additions and 55 deletions

View File

@@ -1,8 +1,9 @@
{ lib {
, pkgs lib,
, config pkgs,
, namespace config,
, ... namespace,
...
}: }:
let let
inherit (lib) types mkIf; inherit (lib) types mkIf;
@@ -92,8 +93,8 @@ in
",XF86AudioLowerVolume, exec, wpctl set-volume @DEFAULT_AUDIO_SINK@ 5%-" ",XF86AudioLowerVolume, exec, wpctl set-volume @DEFAULT_AUDIO_SINK@ 5%-"
",XF86AudioMute, exec, wpctl set-mute @DEFAULT_AUDIO_SINK@ toggle" ",XF86AudioMute, exec, wpctl set-mute @DEFAULT_AUDIO_SINK@ toggle"
",XF86AudioMicMute, exec, wpctl set-mute @DEFAULT_AUDIO_SOURCE@ toggle" ",XF86AudioMicMute, exec, wpctl set-mute @DEFAULT_AUDIO_SOURCE@ toggle"
",XF86MonBrightnessUp, exec, brightnessctl s 10%+" ",XF86MonBrightnessUp, exec, brightnessctl s 4%+"
",XF86MonBrightnessDown, exec, brightnessctl s 10%-" ",XF86MonBrightnessDown, exec, brightnessctl s 5%-"
# macOS Keyboard Brightness # macOS Keyboard Brightness
"$menuMod, XF86MonBrightnessUp, exec, brightnessctl -d kbd_backlight s 10%+" "$menuMod, XF86MonBrightnessUp, exec, brightnessctl -d kbd_backlight s 10%+"

View File

@@ -73,51 +73,6 @@ in
}; };
}; };
# https://huggingface.co/mradermacher/GPT-OSS-Cybersecurity-20B-Merged-i1-GGUF/tree/main
"gpt-oss-csec-20b-thinking" = {
name = "GPT OSS CSEC (20B) - Thinking";
macros.ctx = "131072";
cmd = ''
${llama-cpp}/bin/llama-server \
--port ''${PORT} \
-m /mnt/ssd/Models/GPT-OSS/GPT-OSS-Cybersecurity-20B-Merged.i1-MXFP4_MOE.gguf \
-c ''${ctx} \
--temp 1.0 \
--top-p 1.0 \
--top-k 40 \
-dev CUDA0
'';
metadata = {
type = [ "text-generation" ];
};
};
# https://huggingface.co/unsloth/Qwen3-Next-80B-A3B-Instruct-GGUF/tree/main
"qwen3-next-80b-instruct" = {
name = "Qwen3 Next (80B) - Instruct";
macros.ctx = "262144";
cmd = ''
${llama-cpp}/bin/llama-server \
--port ''${PORT} \
-m /mnt/ssd/Models/Qwen3/Qwen3-Next-80B-A3B-Instruct-UD-Q2_K_XL.gguf \
-c ''${ctx} \
--temp 0.7 \
--min-p 0.0 \
--top-p 0.8 \
--top-k 20 \
--repeat-penalty 1.05 \
-ctk q8_0 \
-ctv q8_0 \
-fit off
'';
metadata = {
type = [
"text-generation"
"coding"
];
};
};
# https://huggingface.co/unsloth/Qwen3-Coder-Next-GGUF/tree/main # https://huggingface.co/unsloth/Qwen3-Coder-Next-GGUF/tree/main
"qwen3-coder-next-80b-instruct" = { "qwen3-coder-next-80b-instruct" = {
name = "Qwen3 Coder Next (80B) - Instruct"; name = "Qwen3 Coder Next (80B) - Instruct";
@@ -132,7 +87,7 @@ in
--min-p 0.01 \ --min-p 0.01 \
--top-k 40 \ --top-k 40 \
-fit off \ -fit off \
-ncmoe 18 \ -ncmoe 19 \
-ts 78,22 -ts 78,22
''; '';

View File

@@ -7,12 +7,12 @@
vulkanSupport = true; vulkanSupport = true;
}).overrideAttrs }).overrideAttrs
(oldAttrs: rec { (oldAttrs: rec {
version = "8196"; version = "8229";
src = pkgs.fetchFromGitHub { src = pkgs.fetchFromGitHub {
owner = "ggml-org"; owner = "ggml-org";
repo = "llama.cpp"; repo = "llama.cpp";
tag = "b${version}"; tag = "b${version}";
hash = "sha256-GZRHiyT8mvhV5RTczDRnCSh31UxRZ3F8tEBC1l8oFNQ="; hash = "sha256-SmCNsQfLQMmwa8PzFPaQb9yBdUZTxM8xxSqhumVGvHM=";
leaveDotGit = true; leaveDotGit = true;
postFetch = '' postFetch = ''
git -C "$out" rev-parse --short HEAD > $out/COMMIT git -C "$out" rev-parse --short HEAD > $out/COMMIT