diff --git a/modules/nixos/services/llama-swap/config.nix b/modules/nixos/services/llama-swap/config.nix index d0ae157..49b5be1 100644 --- a/modules/nixos/services/llama-swap/config.nix +++ b/modules/nixos/services/llama-swap/config.nix @@ -110,8 +110,6 @@ in vllmCmd = '' set -e; pip install xxhash pandas scipy -q; python3 -m vllm._genesis.patches.apply_all; - python3 /patches/patch_tolist_cudagraph.py; - python3 /patches/patch_inputs_embeds_optional.py; python3 /patches/patch_timings_07351e088.py; exec vllm serve --served-model-name ''${MODEL_ID} @@ -142,27 +140,21 @@ in ${pkgs.docker}/bin/docker run --rm --device=nvidia.com/gpu=all \ --name ''${MODEL_ID} \ --ipc=host \ - -e VLLM_WORKER_MULTIPROC_METHOD=spawn \ - -e NCCL_CUMEM_ENABLE=0 \ - -e NCCL_P2P_DISABLE=1 \ - -e VLLM_MEMORY_PROFILER_ESTIMATE_CUDAGRAPHS=0 \ - -e VLLM_NO_USAGE_STATS=1 \ - -e PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True,max_split_size_mb:512 \ - -e VLLM_FLOAT32_MATMUL_PRECISION=high \ - -e VLLM_USE_FLASHINFER_SAMPLER=1 \ - -e OMP_NUM_THREADS=1 \ -e CUDA_DEVICE_MAX_CONNECTIONS=8 \ - -e CUDA_VISIBLE_DEVICES=0 \ -e CUDA_DEVICE_ORDER=PCI_BUS_ID \ - -e VLLM_ALLOW_LONG_MAX_MODEL_LEN=1 \ - -e VLLM_MARLIN_USE_ATOMIC_ADD=1 \ - -e TRITON_CACHE_DIR=/root/.triton/cache \ + -e CUDA_VISIBLE_DEVICES=0 \ + -e GENESIS_BUFFER_MODE=shared \ + -e GENESIS_ENABLE_P100=1 \ + -e GENESIS_ENABLE_P101=1 \ + -e GENESIS_ENABLE_P103=1 \ + -e GENESIS_ENABLE_P15B_FA_VARLEN_CLAMP=1 \ + -e GENESIS_ENABLE_P38B_COMPILE_SAFE=1 \ -e GENESIS_ENABLE_P4=1 \ -e GENESIS_ENABLE_P58_ASYNC_PLACEHOLDER_FIX=1 \ - -e GENESIS_ENABLE_P60_GDN_NGRAM_FIX=1 \ -e GENESIS_ENABLE_P60B_TRITON_KERNEL=1 \ - -e GENESIS_ENABLE_P61_QWEN3_MULTI_TOOL=1 \ + -e GENESIS_ENABLE_P60_GDN_NGRAM_FIX=1 \ -e GENESIS_ENABLE_P61B_STREAMING_OVERLAP=1 \ + -e GENESIS_ENABLE_P61_QWEN3_MULTI_TOOL=1 \ -e GENESIS_ENABLE_P62_STRUCT_OUT_SPEC_TIMING=1 \ -e GENESIS_ENABLE_P64_QWEN3CODER_MTP_STREAMING=1 \ -e GENESIS_ENABLE_P65_TURBOQUANT_SPEC_CG_DOWNGRADE=1 \ @@ -170,59 +162,65 @@ in -e GENESIS_ENABLE_P67_TQ_MULTI_QUERY_KERNEL=1 \ -e GENESIS_ENABLE_P68_AUTO_FORCE_TOOL=1 \ -e GENESIS_ENABLE_P69_LONG_CTX_TOOL_REMINDER=1 \ - -e GENESIS_P68_P69_LONG_CTX_THRESHOLD_CHARS=50000 \ -e GENESIS_ENABLE_P72_PROFILE_RUN_CAP=1 \ - -e GENESIS_PROFILE_RUN_CAP_M=4128 \ -e GENESIS_ENABLE_P74_CHUNK_CLAMP=1 \ + -e GENESIS_ENABLE_P78_TOLIST_CAPTURE_GUARD=0 \ + -e GENESIS_ENABLE_P81_FP8_BLOCK_SCALED_M_LE_8=0 \ + -e GENESIS_ENABLE_P82=0 \ -e GENESIS_ENABLE_P83=1 \ -e GENESIS_ENABLE_P87=1 \ -e GENESIS_ENABLE_P91=1 \ -e GENESIS_ENABLE_P94=1 \ -e GENESIS_ENABLE_P98=1 \ -e GENESIS_ENABLE_P99=1 \ - -e GENESIS_ENABLE_P100=1 \ - -e GENESIS_ENABLE_P101=1 \ - -e GENESIS_ENABLE_P103=1 \ - -e GENESIS_ENABLE_PN32_GDN_CHUNKED_PREFILL=1 \ - -e GENESIS_PN32_GDN_CHUNK_SIZE=8192 \ - -e GENESIS_PN32_GDN_CHUNK_THRESHOLD=16384 \ - -e GENESIS_FLA_FWD_H_MAX_T=16384 \ - -e GENESIS_ENABLE_PN34_WORKSPACE_LOCK_RELAX=1 \ - -e GENESIS_ENABLE_PN8_MTP_DRAFT_ONLINE_QUANT=1 \ - -e GENESIS_ENABLE_PN9_INDEPENDENT_DRAFTER_ATTN=1 \ -e GENESIS_ENABLE_PN11_GDN_AB_CONTIGUOUS=1 \ -e GENESIS_ENABLE_PN12_FFN_INTERMEDIATE_POOL=1 \ -e GENESIS_ENABLE_PN13_CUDA_GRAPH_LAMBDA_ARITY=1 \ -e GENESIS_ENABLE_PN14_TQ_DECODE_OOB_CLAMP=1 \ -e GENESIS_ENABLE_PN17_FA2_LSE_CLAMP=1 \ - -e GENESIS_ENABLE_PN25_SILU_INDUCTOR_SAFE=1 \ - -e GENESIS_ENABLE_PN30_DS_LAYOUT_SPEC_DECODE=1 \ - -e GENESIS_PREALLOC_TOKEN_BUDGET=4128 \ - -e GENESIS_BUFFER_MODE=shared \ - -e GENESIS_ENABLE_P78_TOLIST_CAPTURE_GUARD=0 \ - -e GENESIS_ENABLE_P81_FP8_BLOCK_SCALED_M_LE_8=0 \ - -e GENESIS_ENABLE_P82=0 \ - -e GENESIS_P82_THRESHOLD_SINGLE=0.3 \ -e GENESIS_ENABLE_PN19_SCOPED_MAX_SPLIT=1 \ -e GENESIS_ENABLE_PN22_LOCAL_ARGMAX_TP=1 \ + -e GENESIS_ENABLE_PN25_SILU_INDUCTOR_SAFE=1 \ -e GENESIS_ENABLE_PN26_SPARSE_V=1 \ + -e GENESIS_ENABLE_PN30_DS_LAYOUT_SPEC_DECODE=1 \ + -e GENESIS_ENABLE_PN32_GDN_CHUNKED_PREFILL=1 \ + -e GENESIS_ENABLE_PN34_WORKSPACE_LOCK_RELAX=1 \ + -e GENESIS_ENABLE_PN59_STREAMING_GDN=1 \ + -e GENESIS_ENABLE_PN8_MTP_DRAFT_ONLINE_QUANT=1 \ + -e GENESIS_ENABLE_PN9_INDEPENDENT_DRAFTER_ATTN=1 \ + -e GENESIS_FLA_FWD_H_MAX_T=16384 \ + -e GENESIS_P68_P69_LONG_CTX_THRESHOLD_CHARS=50000 \ + -e GENESIS_P82_THRESHOLD_SINGLE=0.3 \ -e GENESIS_PN26_SPARSE_V_BLOCK_KV=8 \ -e GENESIS_PN26_SPARSE_V_NUM_WARPS=4 \ -e GENESIS_PN26_SPARSE_V_THRESHOLD=0.01 \ - -e GENESIS_ENABLE_P38B_COMPILE_SAFE=1 \ - -e GENESIS_ENABLE_P15B_FA_VARLEN_CLAMP=1 \ + -e GENESIS_PN32_GDN_CHUNK_SIZE=8192 \ + -e GENESIS_PN32_GDN_CHUNK_THRESHOLD=16384 \ + -e GENESIS_PN59_DEBUG=1 \ + -e GENESIS_PREALLOC_TOKEN_BUDGET=4128 \ + -e GENESIS_PROFILE_RUN_CAP_M=4128 \ + -e NCCL_CUMEM_ENABLE=0 \ + -e NCCL_P2P_DISABLE=1 \ + -e OMP_NUM_THREADS=1 \ + -e PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True,max_split_size_mb:512 \ + -e TRITON_CACHE_DIR=/root/.triton/cache \ + -e VLLM_ALLOW_LONG_MAX_MODEL_LEN=1 \ + -e VLLM_FLOAT32_MATMUL_PRECISION=high \ + -e VLLM_MARLIN_USE_ATOMIC_ADD=1 \ + -e VLLM_MEMORY_PROFILER_ESTIMATE_CUDAGRAPHS=0 \ + -e VLLM_NO_USAGE_STATS=1 \ -e VLLM_SSM_CONV_STATE_LAYOUT=DS \ + -e VLLM_USE_FLASHINFER_SAMPLER=1 \ -e VLLM_USE_FUSED_MOE_GROUPED_TOPK=1 \ + -e VLLM_WORKER_MULTIPROC_METHOD=spawn \ -v /mnt/ssd/vLLM/Models:/root/.cache/huggingface \ -v /mnt/ssd/vLLM/Cache/torch_compile:/root/.cache/vllm/torch_compile_cache \ -v /mnt/ssd/vLLM/Cache/triton:/root/.triton/cache \ -v /mnt/ssd/vLLM/Patches/genesis/vllm/_genesis:/usr/local/lib/python3.12/dist-packages/vllm/_genesis:ro \ - -v /mnt/ssd/vLLM/Patches/patch_tolist_cudagraph.py:/patches/patch_tolist_cudagraph.py:ro \ - -v /mnt/ssd/vLLM/Patches/patch_inputs_embeds_optional.py:/patches/patch_inputs_embeds_optional.py:ro \ -v /mnt/ssd/vLLM/Patches/patch_timings_07351e088.py:/patches/patch_timings_07351e088.py:ro \ -p ''${PORT}:8000 \ --entrypoint /bin/bash \ - vllm/vllm-openai:nightly-7a1eb8ac2ec4ea69338c51dc7afd4b15010abfa8 \ + vllm/vllm-openai:nightly-01d4d1ad375dc5854779c593eee093bcebb0cada \ -c "${vllmCmdFlat}" ''; cmdStop = "${pkgs.docker}/bin/docker stop \${MODEL_ID}"; @@ -248,7 +246,6 @@ in vllmCmd = '' set -e; pip install xxhash pandas scipy -q; python3 -m vllm._genesis.patches.apply_all; - python3 /patches/patch_tolist_cudagraph.py; python3 /patches/patch_timings_07351e088.py; exec vllm serve --served-model-name ''${MODEL_ID} @@ -278,27 +275,21 @@ in ${pkgs.docker}/bin/docker run --rm --device=nvidia.com/gpu=all \ --name ''${MODEL_ID} \ --ipc=host \ - -e VLLM_WORKER_MULTIPROC_METHOD=spawn \ - -e NCCL_CUMEM_ENABLE=0 \ - -e NCCL_P2P_DISABLE=1 \ - -e VLLM_MEMORY_PROFILER_ESTIMATE_CUDAGRAPHS=0 \ - -e VLLM_NO_USAGE_STATS=1 \ - -e PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True,max_split_size_mb:512 \ - -e VLLM_FLOAT32_MATMUL_PRECISION=high \ - -e VLLM_USE_FLASHINFER_SAMPLER=1 \ - -e OMP_NUM_THREADS=1 \ -e CUDA_DEVICE_MAX_CONNECTIONS=8 \ - -e CUDA_VISIBLE_DEVICES=0 \ -e CUDA_DEVICE_ORDER=PCI_BUS_ID \ - -e VLLM_ALLOW_LONG_MAX_MODEL_LEN=1 \ - -e VLLM_MARLIN_USE_ATOMIC_ADD=1 \ - -e TRITON_CACHE_DIR=/root/.triton/cache \ + -e CUDA_VISIBLE_DEVICES=0 \ + -e GENESIS_BUFFER_MODE=shared \ + -e GENESIS_ENABLE_P100=1 \ + -e GENESIS_ENABLE_P101=1 \ + -e GENESIS_ENABLE_P103=1 \ + -e GENESIS_ENABLE_P15B_FA_VARLEN_CLAMP=1 \ + -e GENESIS_ENABLE_P38B_COMPILE_SAFE=1 \ -e GENESIS_ENABLE_P4=1 \ -e GENESIS_ENABLE_P58_ASYNC_PLACEHOLDER_FIX=1 \ - -e GENESIS_ENABLE_P60_GDN_NGRAM_FIX=1 \ -e GENESIS_ENABLE_P60B_TRITON_KERNEL=1 \ - -e GENESIS_ENABLE_P61_QWEN3_MULTI_TOOL=1 \ + -e GENESIS_ENABLE_P60_GDN_NGRAM_FIX=1 \ -e GENESIS_ENABLE_P61B_STREAMING_OVERLAP=1 \ + -e GENESIS_ENABLE_P61_QWEN3_MULTI_TOOL=1 \ -e GENESIS_ENABLE_P62_STRUCT_OUT_SPEC_TIMING=1 \ -e GENESIS_ENABLE_P64_QWEN3CODER_MTP_STREAMING=1 \ -e GENESIS_ENABLE_P65_TURBOQUANT_SPEC_CG_DOWNGRADE=1 \ @@ -306,26 +297,17 @@ in -e GENESIS_ENABLE_P67_TQ_MULTI_QUERY_KERNEL=1 \ -e GENESIS_ENABLE_P68_AUTO_FORCE_TOOL=1 \ -e GENESIS_ENABLE_P69_LONG_CTX_TOOL_REMINDER=1 \ - -e GENESIS_P68_P69_LONG_CTX_THRESHOLD_CHARS=50000 \ -e GENESIS_ENABLE_P72_PROFILE_RUN_CAP=1 \ - -e GENESIS_PROFILE_RUN_CAP_M=4128 \ -e GENESIS_ENABLE_P74_CHUNK_CLAMP=1 \ + -e GENESIS_ENABLE_P78_TOLIST_CAPTURE_GUARD=0 \ + -e GENESIS_ENABLE_P81_FP8_BLOCK_SCALED_M_LE_8=0 \ + -e GENESIS_ENABLE_P82=0 \ -e GENESIS_ENABLE_P83=1 \ -e GENESIS_ENABLE_P87=1 \ -e GENESIS_ENABLE_P91=1 \ -e GENESIS_ENABLE_P94=1 \ -e GENESIS_ENABLE_P98=1 \ -e GENESIS_ENABLE_P99=1 \ - -e GENESIS_ENABLE_P100=1 \ - -e GENESIS_ENABLE_P101=1 \ - -e GENESIS_ENABLE_P103=1 \ - -e GENESIS_ENABLE_PN32_GDN_CHUNKED_PREFILL=1 \ - -e GENESIS_PN32_GDN_CHUNK_SIZE=8192 \ - -e GENESIS_PN32_GDN_CHUNK_THRESHOLD=16384 \ - -e GENESIS_FLA_FWD_H_MAX_T=16384 \ - -e GENESIS_ENABLE_PN34_WORKSPACE_LOCK_RELAX=1 \ - -e GENESIS_ENABLE_PN8_MTP_DRAFT_ONLINE_QUANT=1 \ - -e GENESIS_ENABLE_PN9_INDEPENDENT_DRAFTER_ATTN=1 \ -e GENESIS_ENABLE_PN11_GDN_AB_CONTIGUOUS=1 \ -e GENESIS_ENABLE_PN12_FFN_INTERMEDIATE_POOL=1 \ -e GENESIS_ENABLE_PN13_CUDA_GRAPH_LAMBDA_ARITY=1 \ @@ -333,31 +315,46 @@ in -e GENESIS_ENABLE_PN17_FA2_LSE_CLAMP=1 \ -e GENESIS_ENABLE_PN19_SCOPED_MAX_SPLIT=1 \ -e GENESIS_ENABLE_PN22_LOCAL_ARGMAX_TP=1 \ + -e GENESIS_ENABLE_PN25_SILU_INDUCTOR_SAFE=1 \ -e GENESIS_ENABLE_PN26_SPARSE_V=1 \ + -e GENESIS_ENABLE_PN30_DS_LAYOUT_SPEC_DECODE=1 \ + -e GENESIS_ENABLE_PN32_GDN_CHUNKED_PREFILL=1 \ + -e GENESIS_ENABLE_PN34_WORKSPACE_LOCK_RELAX=1 \ + -e GENESIS_ENABLE_PN59_STREAMING_GDN=1 \ + -e GENESIS_ENABLE_PN8_MTP_DRAFT_ONLINE_QUANT=1 \ + -e GENESIS_ENABLE_PN9_INDEPENDENT_DRAFTER_ATTN=1 \ + -e GENESIS_FLA_FWD_H_MAX_T=16384 \ + -e GENESIS_P68_P69_LONG_CTX_THRESHOLD_CHARS=50000 \ + -e GENESIS_P82_THRESHOLD_SINGLE=0.3 \ -e GENESIS_PN26_SPARSE_V_BLOCK_KV=8 \ -e GENESIS_PN26_SPARSE_V_NUM_WARPS=4 \ -e GENESIS_PN26_SPARSE_V_THRESHOLD=0.01 \ - -e GENESIS_ENABLE_P38B_COMPILE_SAFE=1 \ - -e GENESIS_ENABLE_P15B_FA_VARLEN_CLAMP=1 \ - -e GENESIS_ENABLE_PN25_SILU_INDUCTOR_SAFE=1 \ - -e GENESIS_ENABLE_PN30_DS_LAYOUT_SPEC_DECODE=1 \ + -e GENESIS_PN32_GDN_CHUNK_SIZE=8192 \ + -e GENESIS_PN32_GDN_CHUNK_THRESHOLD=16384 \ -e GENESIS_PREALLOC_TOKEN_BUDGET=4128 \ - -e GENESIS_BUFFER_MODE=shared \ - -e GENESIS_ENABLE_P78_TOLIST_CAPTURE_GUARD=0 \ - -e GENESIS_ENABLE_P81_FP8_BLOCK_SCALED_M_LE_8=0 \ - -e GENESIS_ENABLE_P82=0 \ - -e GENESIS_P82_THRESHOLD_SINGLE=0.3 \ + -e GENESIS_PROFILE_RUN_CAP_M=4128 \ + -e NCCL_CUMEM_ENABLE=0 \ + -e NCCL_P2P_DISABLE=1 \ + -e OMP_NUM_THREADS=1 \ + -e PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True,max_split_size_mb:512 \ + -e TRITON_CACHE_DIR=/root/.triton/cache \ + -e VLLM_ALLOW_LONG_MAX_MODEL_LEN=1 \ + -e VLLM_FLOAT32_MATMUL_PRECISION=high \ + -e VLLM_MARLIN_USE_ATOMIC_ADD=1 \ + -e VLLM_MEMORY_PROFILER_ESTIMATE_CUDAGRAPHS=0 \ + -e VLLM_NO_USAGE_STATS=1 \ -e VLLM_SSM_CONV_STATE_LAYOUT=DS \ + -e VLLM_USE_FLASHINFER_SAMPLER=1 \ -e VLLM_USE_FUSED_MOE_GROUPED_TOPK=1 \ + -e VLLM_WORKER_MULTIPROC_METHOD=spawn \ -v /mnt/ssd/vLLM/Models:/root/.cache/huggingface \ -v /mnt/ssd/vLLM/Cache/torch_compile:/root/.cache/vllm/torch_compile_cache \ -v /mnt/ssd/vLLM/Cache/triton:/root/.triton/cache \ -v /mnt/ssd/vLLM/Patches/genesis/vllm/_genesis:/usr/local/lib/python3.12/dist-packages/vllm/_genesis:ro \ - -v /mnt/ssd/vLLM/Patches/patch_tolist_cudagraph.py:/patches/patch_tolist_cudagraph.py:ro \ -v /mnt/ssd/vLLM/Patches/patch_timings_07351e088.py:/patches/patch_timings_07351e088.py:ro \ -p ''${PORT}:8000 \ --entrypoint /bin/bash \ - vllm/vllm-openai:nightly-7a1eb8ac2ec4ea69338c51dc7afd4b15010abfa8 \ + vllm/vllm-openai:nightly-01d4d1ad375dc5854779c593eee093bcebb0cada \ -c "${vllmCmdFlat}" ''; cmdStop = "${pkgs.docker}/bin/docker stop \${MODEL_ID}"; @@ -384,7 +381,6 @@ in vllmCmd = '' set -e; pip install xxhash pandas scipy -q; python3 -m vllm._genesis.patches.apply_all; - python3 /patches/patch_tolist_cudagraph.py; python3 /patches/patch_timings_07351e088.py; exec vllm serve --served-model-name ''${MODEL_ID} @@ -414,45 +410,45 @@ in ${pkgs.docker}/bin/docker run --rm --device=nvidia.com/gpu=all \ --name ''${MODEL_ID} \ --ipc=host \ - -e VLLM_WORKER_MULTIPROC_METHOD=spawn \ - -e NCCL_CUMEM_ENABLE=0 \ - -e NCCL_P2P_DISABLE=1 \ - -e VLLM_MEMORY_PROFILER_ESTIMATE_CUDAGRAPHS=0 \ - -e VLLM_NO_USAGE_STATS=1 \ - -e PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True,max_split_size_mb:512 \ - -e VLLM_FLOAT32_MATMUL_PRECISION=high \ - -e VLLM_USE_FLASHINFER_SAMPLER=1 \ - -e OMP_NUM_THREADS=1 \ -e CUDA_DEVICE_MAX_CONNECTIONS=8 \ - -e CUDA_VISIBLE_DEVICES=0 \ -e CUDA_DEVICE_ORDER=PCI_BUS_ID \ - -e VLLM_ALLOW_LONG_MAX_MODEL_LEN=1 \ - -e VLLM_MARLIN_USE_ATOMIC_ADD=1 \ - -e TRITON_CACHE_DIR=/root/.triton/cache \ + -e CUDA_VISIBLE_DEVICES=0 \ -e GENESIS_ENABLE_P58_ASYNC_PLACEHOLDER_FIX=1 \ -e GENESIS_ENABLE_P64_QWEN3CODER_MTP_STREAMING=1 \ -e GENESIS_ENABLE_P66_CUDAGRAPH_SIZE_FILTER=1 \ -e GENESIS_ENABLE_P68_AUTO_FORCE_TOOL=1 \ -e GENESIS_ENABLE_P69_LONG_CTX_TOOL_REMINDER=1 \ - -e GENESIS_P68_P69_LONG_CTX_THRESHOLD_CHARS=50000 \ -e GENESIS_ENABLE_P72_PROFILE_RUN_CAP=1 \ - -e GENESIS_PROFILE_RUN_CAP_M=4128 \ -e GENESIS_ENABLE_P74_CHUNK_CLAMP=1 \ -e GENESIS_ENABLE_P94=1 \ - -e GENESIS_ENABLE_PN8_MTP_DRAFT_ONLINE_QUANT=1 \ -e GENESIS_ENABLE_PN13_CUDA_GRAPH_LAMBDA_ARITY=1 \ -e GENESIS_ENABLE_PN14_TQ_DECODE_OOB_CLAMP=1 \ -e GENESIS_ENABLE_PN17_FA2_LSE_CLAMP=1 \ -e GENESIS_ENABLE_PN19_SCOPED_MAX_SPLIT=1 \ + -e GENESIS_ENABLE_PN59_STREAMING_GDN=1 \ + -e GENESIS_ENABLE_PN8_MTP_DRAFT_ONLINE_QUANT=1 \ + -e GENESIS_P68_P69_LONG_CTX_THRESHOLD_CHARS=50000 \ + -e GENESIS_PROFILE_RUN_CAP_M=4128 \ + -e NCCL_CUMEM_ENABLE=0 \ + -e NCCL_P2P_DISABLE=1 \ + -e OMP_NUM_THREADS=1 \ + -e PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True,max_split_size_mb:512 \ + -e TRITON_CACHE_DIR=/root/.triton/cache \ + -e VLLM_ALLOW_LONG_MAX_MODEL_LEN=1 \ + -e VLLM_FLOAT32_MATMUL_PRECISION=high \ + -e VLLM_MARLIN_USE_ATOMIC_ADD=1 \ + -e VLLM_MEMORY_PROFILER_ESTIMATE_CUDAGRAPHS=0 \ + -e VLLM_NO_USAGE_STATS=1 \ + -e VLLM_USE_FLASHINFER_SAMPLER=1 \ + -e VLLM_WORKER_MULTIPROC_METHOD=spawn \ -v /mnt/ssd/vLLM/Models:/root/.cache/huggingface \ -v /mnt/ssd/vLLM/Cache/torch_compile:/root/.cache/vllm/torch_compile_cache \ -v /mnt/ssd/vLLM/Cache/triton:/root/.triton/cache \ -v /mnt/ssd/vLLM/Patches/genesis/vllm/_genesis:/usr/local/lib/python3.12/dist-packages/vllm/_genesis:ro \ - -v /mnt/ssd/vLLM/Patches/patch_tolist_cudagraph.py:/patches/patch_tolist_cudagraph.py:ro \ -v /mnt/ssd/vLLM/Patches/patch_timings_07351e088.py:/patches/patch_timings_07351e088.py:ro \ -p ''${PORT}:8000 \ --entrypoint /bin/bash \ - vllm/vllm-openai:nightly-7a1eb8ac2ec4ea69338c51dc7afd4b15010abfa8 \ + vllm/vllm-openai:nightly-01d4d1ad375dc5854779c593eee093bcebb0cada \ -c "${vllmCmdFlat}" ''; cmdStop = "${pkgs.docker}/bin/docker stop \${MODEL_ID}"; diff --git a/modules/nixos/services/llama-swap/setup-qwen36-vllm.sh b/modules/nixos/services/llama-swap/setup-qwen36-vllm.sh index 2f8a324..67f4687 100755 --- a/modules/nixos/services/llama-swap/setup-qwen36-vllm.sh +++ b/modules/nixos/services/llama-swap/setup-qwen36-vllm.sh @@ -5,33 +5,28 @@ # source patches to the Genesis tree, and fetches all boot-time sidecar # patches into place under /mnt/ssd/vLLM/. # -# Idempotent — safe to re-run; skips steps already completed. +# Idempotent - safe to re-run; skips steps already completed. # # Prerequisites: git (with git-lfs), docker set -euo pipefail +# Model Directories MODEL_DIR="/mnt/ssd/vLLM/Models" MODEL_SUBDIR="qwen3.6-27b-autoround-int4" PATCHES_DIR="/mnt/ssd/vLLM/Patches" CACHE_DIR="/mnt/ssd/vLLM/Cache" GENESIS_DIR="${PATCHES_DIR}/genesis" +GENESIS_PIN="${GENESIS_PIN:-7b9fd319}" -# Pin Genesis to the validated commit (bump requires re-testing all composes) -GENESIS_PIN="${GENESIS_PIN:-2db18df}" - -TOLIST_PATCH="${PATCHES_DIR}/patch_tolist_cudagraph.py" +# 3090 Patches +BASE_3090_PATCH_URL="https://raw.githubusercontent.com/noonghunna/club-3090/v7.69-cliff2-test/models/qwen3.6-27b/vllm/patches" INPUTS_EMBEDS_PATCH="${PATCHES_DIR}/patch_inputs_embeds_optional.py" -WORKSPACE_LOCK_PATCH="${PATCHES_DIR}/patch_workspace_lock_disable.py" -PN25_REGISTER_PATCH="${PATCHES_DIR}/patch_pn25_genesis_register_fix.py" -PN30_DST_PATCH="${PATCHES_DIR}/patch_pn30_dst_shaped_temp_fix.py" -PR40798_PATCH="${PATCHES_DIR}/patch_pr40798_workspace.py" + +# Timings Patch TIMINGS_PATCH="${PATCHES_DIR}/patch_timings_07351e088.py" TIMINGS_PATCH_URL="${TIMINGS_PATCH_URL:-https://gitea.va.reichard.io/evan/nix/raw/branch/master/modules/nixos/services/llama-swap/patches/patch_timings_07351e088.py}" -# Base URL for sidecar patches (club-3090 repo, v7.69-cliff2-test branch) -PATCH_BASE_URL="https://raw.githubusercontent.com/noonghunna/club-3090/v7.69-cliff2-test/models/qwen3.6-27b/vllm/patches" - # ---------- Preflight Checks ---------- for cmd in git git-lfs curl; do if ! command -v "$cmd" &>/dev/null; then @@ -56,7 +51,7 @@ fi # ---------- Clone / Pin Genesis Patches ---------- if [ -d "${GENESIS_DIR}/.git" ]; then - echo "Genesis already cloned — fetching + checking out ${GENESIS_PIN} ..." + echo "Genesis already cloned - fetching + checking out ${GENESIS_PIN} ..." (cd "${GENESIS_DIR}" && git fetch origin && git checkout "${GENESIS_PIN}" 2>&1 | tail -3) else echo "Cloning Genesis patches at ${GENESIS_PIN} ..." @@ -64,7 +59,7 @@ else (cd "${GENESIS_DIR}" && git checkout "${GENESIS_PIN}") fi -# Sanity Check — v7.14+ layout +# Sanity Check if [[ ! -d "${GENESIS_DIR}/vllm/_genesis" ]]; then echo "ERROR: genesis tree at ${GENESIS_PIN} missing vllm/_genesis package." >&2 echo " Re-run with GENESIS_PIN= to try a different version." >&2 @@ -73,7 +68,6 @@ fi echo "Genesis pinned to ${GENESIS_PIN} ($(cd "${GENESIS_DIR}" && git rev-parse --short HEAD))" # ---------- Download Sidecar Patches ---------- -# Fetched from club-3090 repo so this script is self-contained. download_patch() { local dest="$1" local filename @@ -82,17 +76,12 @@ download_patch() { echo "Patch ${filename} already present, skipping." else echo "Downloading ${filename}..." - curl -fsSL "${PATCH_BASE_URL}/${filename}" -o "${dest}" + curl -fsSL "${BASE_3090_PATCH_URL}/${filename}" -o "${dest}" echo "Patch ${filename} written." fi } -download_patch "${TOLIST_PATCH}" download_patch "${INPUTS_EMBEDS_PATCH}" -download_patch "${WORKSPACE_LOCK_PATCH}" -download_patch "${PN25_REGISTER_PATCH}" -download_patch "${PN30_DST_PATCH}" -download_patch "${PR40798_PATCH}" # ---------- Download Timing Patch ---------- tmp_timings_patch="$(mktemp)" @@ -113,8 +102,8 @@ fi echo "" echo "=== Setup Complete ===" echo " Model: ${MODEL_DIR}/${MODEL_SUBDIR}" -echo " Genesis: ${GENESIS_DIR} (pinned: ${GENESIS_PIN})" echo " Cache: ${CACHE_DIR}/{torch_compile,triton}" +echo " Genesis: ${GENESIS_DIR} (pinned: ${GENESIS_PIN})" echo "" echo "Expected layout:" echo " /mnt/ssd/vLLM/" @@ -124,12 +113,7 @@ echo " ├── Cache/" echo " │ ├── torch_compile/ (torch.compile cache)" echo " │ └── triton/ (Triton kernel cache)" echo " └── Patches/" -echo " ├── genesis/ (Genesis v7.69 @ ${GENESIS_PIN})" -echo " │ └── vllm/_genesis/ (mounted into container; PN25+PN30+PN34 native)" -echo " ├── patch_tolist_cudagraph.py (boot-time: cudagraph capture fix)" +echo " ├── genesis/ (Genesis @ ${GENESIS_PIN})" +echo " │ └── vllm/_genesis/ (mounted into container)" echo " ├── patch_inputs_embeds_optional.py (boot-time: vllm#35975 backport, text-only models)" -echo " ├── patch_workspace_lock_disable.py (rollback: superseded by PN34 in v7.69)" -echo " ├── patch_pn25_genesis_register_fix.py (rollback: folded into v7.69 natively)" -echo " ├── patch_pn30_dst_shaped_temp_fix.py (rollback: folded into v7.69 natively)" -echo " ├── patch_pr40798_workspace.py (PR40798 workspace fix)" echo " └── patch_timings_07351e088.py (boot-time: llama.cpp-compatible timings)"