chore(cleanup): sops, opencode, etc
This commit is contained in:
@@ -1,31 +1,39 @@
|
||||
{ config, lib, namespace, ... }:
|
||||
{ config
|
||||
, lib
|
||||
, namespace
|
||||
, ...
|
||||
}:
|
||||
let
|
||||
inherit (lib) mkIf mkEnableOption types;
|
||||
inherit (lib.${namespace}) mkOpt;
|
||||
getFile = lib.snowfall.fs.get-file;
|
||||
|
||||
user = config.users.users.${config.${namespace}.user.name};
|
||||
cfg = config.${namespace}.security.sops;
|
||||
in
|
||||
{
|
||||
options.${namespace}.security.sops = {
|
||||
enable = lib.mkEnableOption "sops";
|
||||
defaultSopsFile = mkOpt lib.types.path null "Default sops file.";
|
||||
sshKeyPaths = mkOpt (with lib.types; listOf path) [
|
||||
# "/etc/ssh/ssh_host_ed25519_key"
|
||||
] "SSH Key paths to use.";
|
||||
options.${namespace}.security.sops = with types; {
|
||||
enable = mkEnableOption "Enable sops";
|
||||
defaultSopsFile = mkOpt str "secrets/systems/${config.system.name}.yaml" "Default sops file.";
|
||||
sshKeyPaths = mkOpt (listOf path) [ ] "Additional SSH key paths to use.";
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
config = mkIf cfg.enable {
|
||||
sops = {
|
||||
inherit (cfg) defaultSopsFile;
|
||||
defaultSopsFile = getFile cfg.defaultSopsFile;
|
||||
|
||||
age = {
|
||||
inherit (cfg) sshKeyPaths;
|
||||
|
||||
keyFile = "${config.users.users.${config.${namespace}.user.name}.home}/.config/sops/age/keys.txt";
|
||||
keyFile = "${user.home}/.config/sops/age/keys.txt";
|
||||
sshKeyPaths = [
|
||||
"/etc/ssh/ssh_host_ed25519_key"
|
||||
"${user.home}/.ssh/id_ed25519"
|
||||
]
|
||||
++ cfg.sshKeyPaths;
|
||||
};
|
||||
};
|
||||
|
||||
sops.secrets.builder_ssh_key = {
|
||||
sopsFile = lib.snowfall.fs.get-file "secrets/default.yaml";
|
||||
sopsFile = getFile "secrets/common/systems.yaml";
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,123 +0,0 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
namespace,
|
||||
...
|
||||
}:
|
||||
let
|
||||
inherit (lib) types mkIf mkEnableOption;
|
||||
inherit (lib.${namespace}) mkOpt;
|
||||
cfg = config.${namespace}.services.llama-cpp;
|
||||
|
||||
modelDir = "/models";
|
||||
availableModels = {
|
||||
"qwen2.5-coder-7b-q8_0.gguf" = {
|
||||
url = "https://huggingface.co/ggml-org/Qwen2.5-Coder-7B-Q8_0-GGUF/resolve/main/qwen2.5-coder-7b-q8_0.gguf?download=true";
|
||||
flag = "--fim-qwen-7b-default";
|
||||
};
|
||||
"qwen2.5-coder-3b-q8_0.gguf" = {
|
||||
url = "https://huggingface.co/ggml-org/Qwen2.5-Coder-3B-Q8_0-GGUF/resolve/main/qwen2.5-coder-3b-q8_0.gguf?download=true";
|
||||
flag = "--fim-qwen-3b-default";
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
options.${namespace}.services.llama-cpp = with types; {
|
||||
enable = mkEnableOption "llama-cpp support";
|
||||
modelName = mkOpt str "qwen2.5-coder-3b-q8_0.gguf" "model to use";
|
||||
};
|
||||
|
||||
config =
|
||||
let
|
||||
modelPath = "${modelDir}/${cfg.modelName}";
|
||||
in
|
||||
mkIf cfg.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = availableModels ? ${cfg.modelName};
|
||||
message = "Invalid model '${cfg.modelName}'. Available models: ${lib.concatStringsSep ", " (lib.attrNames availableModels)}";
|
||||
}
|
||||
];
|
||||
|
||||
systemd.services = {
|
||||
# LLama Download Model
|
||||
download-model = {
|
||||
description = "Download Model";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
before = [ "llama-cpp.service" ];
|
||||
path = [
|
||||
pkgs.curl
|
||||
pkgs.coreutils
|
||||
];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
User = "root";
|
||||
Group = "root";
|
||||
};
|
||||
script =
|
||||
let
|
||||
modelURL = availableModels.${cfg.modelName}.url;
|
||||
in
|
||||
''
|
||||
set -euo pipefail
|
||||
|
||||
if [ ! -f "${modelPath}" ]; then
|
||||
mkdir -p "${modelDir}"
|
||||
# Add -f flag to follow redirects and -L for location
|
||||
# Add --fail flag to exit with error on HTTP errors
|
||||
# Add -C - to resume interrupted downloads
|
||||
curl -f -L -C - \
|
||||
-H "Accept: application/octet-stream" \
|
||||
--retry 3 \
|
||||
--retry-delay 5 \
|
||||
--max-time 1800 \
|
||||
"${modelURL}" \
|
||||
-o "${modelPath}.tmp" && \
|
||||
mv "${modelPath}.tmp" "${modelPath}"
|
||||
fi
|
||||
'';
|
||||
};
|
||||
|
||||
# Setup LLama API Service
|
||||
llama-cpp = {
|
||||
after = [ "download-model.service" ];
|
||||
requires = [ "download-model.service" ];
|
||||
};
|
||||
};
|
||||
|
||||
services.llama-cpp = {
|
||||
enable = true;
|
||||
host = "0.0.0.0";
|
||||
port = 8012;
|
||||
openFirewall = true;
|
||||
model = "${modelPath}";
|
||||
|
||||
package =
|
||||
(pkgs.llama-cpp.override {
|
||||
cudaSupport = true;
|
||||
blasSupport = true;
|
||||
rocmSupport = false;
|
||||
metalSupport = false;
|
||||
}).overrideAttrs
|
||||
(oldAttrs: {
|
||||
cmakeFlags = oldAttrs.cmakeFlags ++ [
|
||||
"-DGGML_CUDA_ENABLE_UNIFIED_MEMORY=1"
|
||||
"-DCMAKE_CUDA_ARCHITECTURES=61" # GTX-1070 / GTX-1080ti
|
||||
"-DGGML_NATIVE=ON"
|
||||
|
||||
# Disable CPU Instructions - Intel(R) Core(TM) i5-3570K CPU @ 3.40GHz
|
||||
# "-DLLAMA_FMA=OFF"
|
||||
# "-DLLAMA_AVX2=OFF"
|
||||
# "-DLLAMA_AVX512=OFF"
|
||||
# "-DGGML_FMA=OFF"
|
||||
# "-DGGML_AVX2=OFF"
|
||||
# "-DGGML_AVX512=OFF"
|
||||
];
|
||||
});
|
||||
|
||||
extraFlags = [ availableModels.${cfg.modelName}.flag ];
|
||||
};
|
||||
};
|
||||
}
|
||||
507
modules/nixos/services/llama-swap/default.nix
Normal file
507
modules/nixos/services/llama-swap/default.nix
Normal file
@@ -0,0 +1,507 @@
|
||||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, namespace
|
||||
, ...
|
||||
}:
|
||||
let
|
||||
inherit (lib) mkIf mkEnableOption;
|
||||
cfg = config.${namespace}.services.llama-swap;
|
||||
|
||||
llama-swap = pkgs.reichard.llama-swap;
|
||||
llama-cpp = pkgs.reichard.llama-cpp;
|
||||
stable-diffusion-cpp = pkgs.reichard.stable-diffusion-cpp.override {
|
||||
cudaSupport = true;
|
||||
};
|
||||
in
|
||||
{
|
||||
options.${namespace}.services.llama-swap = {
|
||||
enable = mkEnableOption "enable llama-swap service";
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
# Create User
|
||||
users.groups.llama-swap = { };
|
||||
users.users.llama-swap = {
|
||||
isSystemUser = true;
|
||||
group = "llama-swap";
|
||||
};
|
||||
|
||||
# Create Service
|
||||
systemd.services.llama-swap = {
|
||||
description = "Model swapping for LLaMA C++ Server (or any local OpenAPI compatible server)";
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "exec";
|
||||
ExecStart = "${lib.getExe llama-swap} --listen :8080 --config ${
|
||||
config.sops.templates."llama-swap.json".path
|
||||
}";
|
||||
Restart = "on-failure";
|
||||
RestartSec = 3;
|
||||
|
||||
# for GPU acceleration
|
||||
PrivateDevices = false;
|
||||
|
||||
# hardening
|
||||
User = "llama-swap";
|
||||
Group = "llama-swap";
|
||||
CapabilityBoundingSet = "";
|
||||
RestrictAddressFamilies = [
|
||||
"AF_INET"
|
||||
"AF_INET6"
|
||||
"AF_UNIX"
|
||||
];
|
||||
NoNewPrivileges = true;
|
||||
PrivateMounts = true;
|
||||
PrivateTmp = true;
|
||||
PrivateUsers = true;
|
||||
ProtectClock = true;
|
||||
ProtectControlGroups = true;
|
||||
ProtectHome = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectSystem = "strict";
|
||||
MemoryDenyWriteExecute = true;
|
||||
LimitMEMLOCK = "infinity";
|
||||
LockPersonality = true;
|
||||
RemoveIPC = true;
|
||||
RestrictNamespaces = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
SystemCallArchitectures = "native";
|
||||
SystemCallFilter = [
|
||||
"@system-service"
|
||||
"~@privileged"
|
||||
];
|
||||
SystemCallErrorNumber = "EPERM";
|
||||
ProtectProc = "invisible";
|
||||
ProtectHostname = true;
|
||||
ProcSubset = "pid";
|
||||
};
|
||||
};
|
||||
|
||||
# Create Config
|
||||
sops = {
|
||||
secrets.synthetic_apikey = {
|
||||
sopsFile = lib.snowfall.fs.get-file "secrets/common/systems.yaml";
|
||||
};
|
||||
templates."llama-swap.json" = {
|
||||
owner = "llama-swap";
|
||||
group = "llama-swap";
|
||||
mode = "0400";
|
||||
content = builtins.toJSON {
|
||||
models = {
|
||||
# https://huggingface.co/unsloth/Devstral-Small-2-24B-Instruct-2512-GGUF/tree/main
|
||||
"devstral-small-2-instruct" = {
|
||||
name = "Devstral Small 2 (24B) - Instruct";
|
||||
cmd = ''
|
||||
${llama-cpp}/bin/llama-server \
|
||||
--port ''${PORT} \
|
||||
-m /mnt/ssd/Models/Devstral/Devstral-Small-2-24B-Instruct-2512-UD-Q4_K_XL.gguf \
|
||||
--chat-template-file /mnt/ssd/Models/Devstral/Devstral-Small-2-24B-Instruct-2512-UD-Q4_K_XL_template.jinja \
|
||||
--temp 0.15 \
|
||||
-c 98304 \
|
||||
-ctk q8_0 \
|
||||
-ctv q8_0 \
|
||||
-fit off \
|
||||
-dev CUDA0
|
||||
'';
|
||||
metadata = {
|
||||
type = [ "text-generation" ];
|
||||
};
|
||||
env = [ "GGML_CUDA_ENABLE_UNIFIED_MEMORY=1" ];
|
||||
};
|
||||
|
||||
# https://huggingface.co/mradermacher/gpt-oss-20b-heretic-v2-i1-GGUF/tree/main
|
||||
"gpt-oss-20b-thinking" = {
|
||||
name = "GPT OSS (20B) - Thinking";
|
||||
cmd = ''
|
||||
${llama-cpp}/bin/llama-server \
|
||||
--port ''${PORT} \
|
||||
-m /mnt/ssd/Models/GPT-OSS/gpt-oss-20b-heretic-v2.i1-MXFP4_MOE.gguf \
|
||||
-c 131072 \
|
||||
--temp 1.0 \
|
||||
--top-p 1.0 \
|
||||
--top-k 40 \
|
||||
-dev CUDA0
|
||||
'';
|
||||
metadata = {
|
||||
type = [ "text-generation" ];
|
||||
};
|
||||
env = [ "GGML_CUDA_ENABLE_UNIFIED_MEMORY=1" ];
|
||||
};
|
||||
|
||||
# https://huggingface.co/mradermacher/GPT-OSS-Cybersecurity-20B-Merged-i1-GGUF/tree/main
|
||||
"gpt-oss-csec-20b-thinking" = {
|
||||
name = "GPT OSS CSEC (20B) - Thinking";
|
||||
cmd = ''
|
||||
${llama-cpp}/bin/llama-server \
|
||||
--port ''${PORT} \
|
||||
-m /mnt/ssd/Models/GPT-OSS/GPT-OSS-Cybersecurity-20B-Merged.i1-MXFP4_MOE.gguf \
|
||||
-c 131072 \
|
||||
--temp 1.0 \
|
||||
--top-p 1.0 \
|
||||
--top-k 40 \
|
||||
-dev CUDA0
|
||||
'';
|
||||
metadata = {
|
||||
type = [ "text-generation" ];
|
||||
};
|
||||
env = [ "GGML_CUDA_ENABLE_UNIFIED_MEMORY=1" ];
|
||||
};
|
||||
|
||||
# https://huggingface.co/unsloth/Qwen3-Next-80B-A3B-Instruct-GGUF/tree/main
|
||||
"qwen3-next-80b-instruct" = {
|
||||
name = "Qwen3 Next (80B) - Instruct";
|
||||
cmd = ''
|
||||
${llama-cpp}/bin/llama-server \
|
||||
--port ''${PORT} \
|
||||
-m /mnt/ssd/Models/Qwen3/Qwen3-Next-80B-A3B-Instruct-UD-Q2_K_XL.gguf \
|
||||
-c 262144 \
|
||||
--temp 0.7 \
|
||||
--min-p 0.0 \
|
||||
--top-p 0.8 \
|
||||
--top-k 20 \
|
||||
--repeat-penalty 1.05 \
|
||||
-ctk q8_0 \
|
||||
-ctv q8_0 \
|
||||
-fit off
|
||||
'';
|
||||
metadata = {
|
||||
type = [ "text-generation" ];
|
||||
};
|
||||
env = [ "GGML_CUDA_ENABLE_UNIFIED_MEMORY=1" ];
|
||||
};
|
||||
|
||||
# https://huggingface.co/unsloth/Qwen3-30B-A3B-Instruct-2507-GGUF/tree/main
|
||||
"qwen3-30b-2507-instruct" = {
|
||||
name = "Qwen3 2507 (30B) - Instruct";
|
||||
cmd = ''
|
||||
${llama-cpp}/bin/llama-server \
|
||||
--port ''${PORT} \
|
||||
-m /mnt/ssd/Models/Qwen3/Qwen3-30B-A3B-Instruct-2507-Q4_K_M.gguf \
|
||||
-c 262144 \
|
||||
--temp 0.7 \
|
||||
--min-p 0.0 \
|
||||
--top-p 0.8 \
|
||||
--top-k 20 \
|
||||
--repeat-penalty 1.05 \
|
||||
-ctk q8_0 \
|
||||
-ctv q8_0 \
|
||||
-ts 70,30 \
|
||||
-fit off
|
||||
'';
|
||||
metadata = {
|
||||
type = [ "text-generation" ];
|
||||
};
|
||||
env = [ "GGML_CUDA_ENABLE_UNIFIED_MEMORY=1" ];
|
||||
};
|
||||
|
||||
# https://huggingface.co/unsloth/Qwen3-Coder-30B-A3B-Instruct-GGUF/tree/main
|
||||
"qwen3-coder-30b-instruct" = {
|
||||
name = "Qwen3 Coder (30B) - Instruct";
|
||||
cmd = ''
|
||||
${llama-cpp}/bin/llama-server \
|
||||
--port ''${PORT} \
|
||||
-m /mnt/ssd/Models/Qwen3/Qwen3-Coder-30B-A3B-Instruct-UD-Q6_K_XL.gguf \
|
||||
-c 131072 \
|
||||
--temp 0.7 \
|
||||
--min-p 0.0 \
|
||||
--top-p 0.8 \
|
||||
--top-k 20 \
|
||||
--repeat-penalty 1.05 \
|
||||
-ctk q8_0 \
|
||||
-ctv q8_0 \
|
||||
-ts 70,30 \
|
||||
-fit off
|
||||
'';
|
||||
metadata = {
|
||||
type = [ "text-generation" ];
|
||||
};
|
||||
env = [ "GGML_CUDA_ENABLE_UNIFIED_MEMORY=1" ];
|
||||
};
|
||||
|
||||
# https://huggingface.co/unsloth/Qwen3-30B-A3B-Thinking-2507-GGUF/tree/main
|
||||
"qwen3-30b-2507-thinking" = {
|
||||
name = "Qwen3 2507 (30B) - Thinking";
|
||||
cmd = ''
|
||||
${llama-cpp}/bin/llama-server \
|
||||
--port ''${PORT} \
|
||||
-m /mnt/ssd/Models/Qwen3/Qwen3-30B-A3B-Thinking-2507-UD-Q4_K_XL.gguf \
|
||||
-c 262144 \
|
||||
--temp 0.7 \
|
||||
--min-p 0.0 \
|
||||
--top-p 0.8 \
|
||||
--top-k 20 \
|
||||
--repeat-penalty 1.05 \
|
||||
-ctk q8_0 \
|
||||
-ctv q8_0 \
|
||||
-ts 70,30 \
|
||||
-fit off
|
||||
'';
|
||||
metadata = {
|
||||
type = [ "text-generation" ];
|
||||
};
|
||||
env = [ "GGML_CUDA_ENABLE_UNIFIED_MEMORY=1" ];
|
||||
};
|
||||
|
||||
# https://huggingface.co/unsloth/Nemotron-3-Nano-30B-A3B-GGUF/tree/main
|
||||
"nemotron-3-nano-30b-thinking" = {
|
||||
name = "Nemotron 3 Nano (30B) - Thinking";
|
||||
cmd = ''
|
||||
${llama-cpp}/bin/llama-server \
|
||||
--port ''${PORT} \
|
||||
-m /mnt/ssd/Models/Nemotron/Nemotron-3-Nano-30B-A3B-UD-Q4_K_XL.gguf \
|
||||
-c 1048576 \
|
||||
--temp 1.1 \
|
||||
--top-p 0.95 \
|
||||
-fit off
|
||||
'';
|
||||
metadata = {
|
||||
type = [ "text-generation" ];
|
||||
};
|
||||
env = [ "GGML_CUDA_ENABLE_UNIFIED_MEMORY=1" ];
|
||||
};
|
||||
|
||||
# https://huggingface.co/unsloth/Qwen3-VL-8B-Instruct-GGUF/tree/main
|
||||
"qwen3-8b-vision" = {
|
||||
name = "Qwen3 Vision (8B) - Thinking";
|
||||
cmd = ''
|
||||
${llama-cpp}/bin/llama-server \
|
||||
--port ''${PORT} \
|
||||
-m /mnt/ssd/Models/Qwen3/Qwen3-VL-8B-Instruct-UD-Q4_K_XL.gguf \
|
||||
--mmproj /mnt/ssd/Models/Qwen3/Qwen3-VL-8B-Instruct-UD-Q4_K_XL_mmproj-F16.gguf \
|
||||
-c 65536 \
|
||||
--temp 0.7 \
|
||||
--min-p 0.0 \
|
||||
--top-p 0.8 \
|
||||
--top-k 20 \
|
||||
-ctk q8_0 \
|
||||
-ctv q8_0 \
|
||||
-fit off \
|
||||
-dev CUDA1
|
||||
'';
|
||||
metadata = {
|
||||
type = [ "text-generation" ];
|
||||
};
|
||||
env = [ "GGML_CUDA_ENABLE_UNIFIED_MEMORY=1" ];
|
||||
};
|
||||
|
||||
# https://huggingface.co/unsloth/Qwen2.5-Coder-7B-Instruct-128K-GGUF/tree/main
|
||||
"qwen2.5-coder-7b-instruct" = {
|
||||
name = "Qwen2.5 Coder (7B) - Instruct";
|
||||
cmd = ''
|
||||
${llama-cpp}/bin/llama-server \
|
||||
-m /mnt/ssd/Models/Qwen2.5/Qwen2.5-Coder-7B-Instruct-Q8_0.gguf \
|
||||
--fim-qwen-7b-default \
|
||||
-c 131072 \
|
||||
--port ''${PORT} \
|
||||
-fit off \
|
||||
-dev CUDA1
|
||||
'';
|
||||
metadata = {
|
||||
type = [ "text-generation" ];
|
||||
};
|
||||
env = [ "GGML_CUDA_ENABLE_UNIFIED_MEMORY=1" ];
|
||||
};
|
||||
|
||||
# https://huggingface.co/unsloth/Qwen2.5-Coder-3B-Instruct-128K-GGUF/tree/main
|
||||
"qwen2.5-coder-3b-instruct" = {
|
||||
name = "Qwen2.5 Coder (3B) - Instruct";
|
||||
cmd = ''
|
||||
${llama-cpp}/bin/llama-server \
|
||||
-m /mnt/ssd/Models/Qwen2.5/Qwen2.5-Coder-3B-Instruct-Q8_0.gguf \
|
||||
--fim-qwen-3b-default \
|
||||
--port ''${PORT} \
|
||||
-fit off \
|
||||
-dev CUDA1
|
||||
'';
|
||||
metadata = {
|
||||
type = [ "text-generation" ];
|
||||
};
|
||||
env = [ "GGML_CUDA_ENABLE_UNIFIED_MEMORY=1" ];
|
||||
};
|
||||
|
||||
# https://huggingface.co/unsloth/Qwen3-4B-Instruct-2507-GGUF/tree/main
|
||||
"qwen3-4b-2507-instruct" = {
|
||||
name = "Qwen3 2507 (4B) - Instruct";
|
||||
cmd = ''
|
||||
${llama-cpp}/bin/llama-server \
|
||||
--port ''${PORT} \
|
||||
-m /mnt/ssd/Models/Qwen3/Qwen3-4B-Instruct-2507-Q4_K_M.gguf \
|
||||
-c 98304 \
|
||||
-fit off \
|
||||
-ctk q8_0 \
|
||||
-ctv q8_0 \
|
||||
-dev CUDA1
|
||||
'';
|
||||
metadata = {
|
||||
type = [ "text-generation" ];
|
||||
};
|
||||
env = [ "GGML_CUDA_ENABLE_UNIFIED_MEMORY=1" ];
|
||||
};
|
||||
|
||||
"z-image-turbo" = {
|
||||
name = "Z-Image-Turbo";
|
||||
checkEndpoint = "/";
|
||||
cmd = ''
|
||||
${stable-diffusion-cpp}/bin/sd-server \
|
||||
--listen-port ''${PORT} \
|
||||
--diffusion-fa \
|
||||
--diffusion-model /mnt/ssd/StableDiffusion/ZImageTurbo/z-image-turbo-Q8_0.gguf \
|
||||
--vae /mnt/ssd/StableDiffusion/ZImageTurbo/ae.safetensors \
|
||||
--llm /mnt/ssd/Models/Qwen3/Qwen3-4B-Instruct-2507-Q4_K_M.gguf \
|
||||
--cfg-scale 1.0 \
|
||||
--steps 8 \
|
||||
--rng cuda
|
||||
'';
|
||||
metadata = {
|
||||
type = [ "image-generation" ];
|
||||
};
|
||||
env = [ "GGML_CUDA_ENABLE_UNIFIED_MEMORY=1" ];
|
||||
};
|
||||
|
||||
# https://huggingface.co/unsloth/Qwen-Image-Edit-2511-GGUF/tree/main
|
||||
"qwen-image-edit-2511" = {
|
||||
name = "Qwen Image Edit 2511";
|
||||
checkEndpoint = "/";
|
||||
cmd = ''
|
||||
${stable-diffusion-cpp}/bin/sd-server \
|
||||
--listen-port ''${PORT} \
|
||||
--diffusion-fa \
|
||||
--qwen-image-zero-cond-t \
|
||||
--diffusion-model /mnt/ssd/StableDiffusion/QwenImage/qwen-image-edit-2511-Q5_K_M.gguf \
|
||||
--vae /mnt/ssd/StableDiffusion/QwenImage/qwen_image_vae.safetensors \
|
||||
--llm /mnt/ssd/Models/Qwen2.5/Qwen2.5-VL-7B-Instruct.Q4_K_M.gguf \
|
||||
--lora-model-dir /mnt/ssd/StableDiffusion/QwenImage/Loras \
|
||||
--cfg-scale 2.5 \
|
||||
--sampling-method euler \
|
||||
--flow-shift 3 \
|
||||
--steps 20 \
|
||||
--rng cuda
|
||||
'';
|
||||
metadata = {
|
||||
type = [
|
||||
"image-edit"
|
||||
"image-generation"
|
||||
];
|
||||
};
|
||||
env = [ "GGML_CUDA_ENABLE_UNIFIED_MEMORY=1" ];
|
||||
};
|
||||
|
||||
"qwen-image-2512" = {
|
||||
name = "Qwen Image 2512";
|
||||
checkEndpoint = "/";
|
||||
cmd = ''
|
||||
${stable-diffusion-cpp}/bin/sd-server \
|
||||
--listen-port ''${PORT} \
|
||||
--diffusion-fa \
|
||||
--diffusion-model /mnt/ssd/StableDiffusion/QwenImage/qwen-image-2512-Q5_K_M.gguf \
|
||||
--vae /mnt/ssd/StableDiffusion/QwenImage/qwen_image_vae.safetensors \
|
||||
--llm /mnt/ssd/Models/Qwen2.5/Qwen2.5-VL-7B-Instruct.Q4_K_M.gguf \
|
||||
--lora-model-dir /mnt/ssd/StableDiffusion/QwenImage/Loras \
|
||||
--cfg-scale 2.5 \
|
||||
--sampling-method euler \
|
||||
--flow-shift 3 \
|
||||
--steps 20 \
|
||||
--rng cuda
|
||||
'';
|
||||
metadata = {
|
||||
type = [ "image-generation" ];
|
||||
};
|
||||
env = [ "GGML_CUDA_ENABLE_UNIFIED_MEMORY=1" ];
|
||||
};
|
||||
|
||||
"chroma-radiance" = {
|
||||
name = "Chroma Radiance";
|
||||
checkEndpoint = "/";
|
||||
cmd = ''
|
||||
${stable-diffusion-cpp}/bin/sd-server \
|
||||
--listen-port ''${PORT} \
|
||||
--diffusion-fa --chroma-disable-dit-mask \
|
||||
--diffusion-model /mnt/ssd/StableDiffusion/Chroma/chroma_radiance_x0_q8.gguf \
|
||||
--t5xxl /mnt/ssd/StableDiffusion/Chroma/t5xxl_fp16.safetensors \
|
||||
--cfg-scale 4.0 \
|
||||
--sampling-method euler \
|
||||
--rng cuda
|
||||
'';
|
||||
metadata = {
|
||||
type = [ "image-generation" ];
|
||||
};
|
||||
env = [ "GGML_CUDA_ENABLE_UNIFIED_MEMORY=1" ];
|
||||
};
|
||||
};
|
||||
|
||||
groups = {
|
||||
shared = {
|
||||
swap = true;
|
||||
exclusive = false;
|
||||
members = [
|
||||
"nemotron-3-nano-30b-thinking"
|
||||
"qwen3-30b-2507-instruct"
|
||||
"qwen3-30b-2507-thinking"
|
||||
"qwen3-coder-30b-instruct"
|
||||
"qwen3-next-80b-instruct"
|
||||
];
|
||||
};
|
||||
|
||||
cuda0 = {
|
||||
swap = true;
|
||||
exclusive = false;
|
||||
members = [
|
||||
"devstral-small-2-instruct"
|
||||
"gpt-oss-20b-thinking"
|
||||
"gpt-oss-csec-20b-thinking"
|
||||
];
|
||||
};
|
||||
|
||||
cuda1 = {
|
||||
swap = true;
|
||||
exclusive = false;
|
||||
members = [
|
||||
"qwen2.5-coder-3b-instruct"
|
||||
"qwen2.5-coder-7b-instruct"
|
||||
"qwen3-4b-2507-instruct"
|
||||
"qwen3-8b-vision"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
peers = {
|
||||
synthetic = {
|
||||
proxy = "https://api.synthetic.new/openai/";
|
||||
apiKey = "${config.sops.placeholder.synthetic_apikey}";
|
||||
models = [
|
||||
"hf:deepseek-ai/DeepSeek-R1-0528"
|
||||
"hf:deepseek-ai/DeepSeek-V3"
|
||||
"hf:deepseek-ai/DeepSeek-V3-0324"
|
||||
"hf:deepseek-ai/DeepSeek-V3.1"
|
||||
"hf:deepseek-ai/DeepSeek-V3.1-Terminus"
|
||||
"hf:deepseek-ai/DeepSeek-V3.2"
|
||||
"hf:meta-llama/Llama-3.3-70B-Instruct"
|
||||
"hf:meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8"
|
||||
"hf:MiniMaxAI/MiniMax-M2"
|
||||
"hf:MiniMaxAI/MiniMax-M2.1"
|
||||
"hf:moonshotai/Kimi-K2-Instruct-0905"
|
||||
"hf:moonshotai/Kimi-K2-Thinking"
|
||||
"hf:openai/gpt-oss-120b"
|
||||
"hf:Qwen/Qwen3-235B-A22B-Instruct-2507"
|
||||
"hf:Qwen/Qwen3-235B-A22B-Thinking-2507"
|
||||
"hf:Qwen/Qwen3-Coder-480B-A35B-Instruct"
|
||||
"hf:Qwen/Qwen3-VL-235B-A22B-Instruct"
|
||||
"hf:zai-org/GLM-4.5"
|
||||
"hf:zai-org/GLM-4.6"
|
||||
"hf:zai-org/GLM-4.7"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 8080 ];
|
||||
};
|
||||
}
|
||||
@@ -14,16 +14,11 @@ let
|
||||
cfg = config.${namespace}.services.openssh;
|
||||
|
||||
globalKeys = [
|
||||
# evanreichard@lin-va-mbp-personal
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILJJoyXQOv9cAjGUHrUcvsW7vY9W0PmuPMQSI9AMZvNY"
|
||||
# evanreichard@mac-va-mbp-personal
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMWj6rd6uDtHj/gGozgIEgxho/vBKebgN5Kce/N6vQWV"
|
||||
# evanreichard@lin-va-thinkpad
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAq5JQr/6WJMIHhR434nK95FrDmf2ApW2Ahd2+cBKwDz"
|
||||
# evanreichard@lin-va-terminal
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM5e6Cty+7rX5BjIEHBTU6GnzfOxPJiHpSqin/BnsypO"
|
||||
# evanreichard@mobile
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIARTNbl4lgQsp7SJEng7vprL0+ChC9e6iR7o/PiC4Jme"
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILJJoyXQOv9cAjGUHrUcvsW7vY9W0PmuPMQSI9AMZvNY evanreichard@lin-va-mbp-personal"
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMWj6rd6uDtHj/gGozgIEgxho/vBKebgN5Kce/N6vQWV evanreichard@mac-va-mbp-personal"
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAq5JQr/6WJMIHhR434nK95FrDmf2ApW2Ahd2+cBKwDz evanreichard@lin-va-thinkpad"
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM5e6Cty+7rX5BjIEHBTU6GnzfOxPJiHpSqin/BnsypO evanreichard@lin-va-terminal"
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIARTNbl4lgQsp7SJEng7vprL0+ChC9e6iR7o/PiC4Jme evanreichard@mobile"
|
||||
];
|
||||
in
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user