feat: vllm yay
This commit is contained in:
@@ -358,60 +358,21 @@ in
|
||||
};
|
||||
};
|
||||
|
||||
"vllm-qwen3.5-27b-thinking" = {
|
||||
name = "vLLM Qwen3.5 (27B) - Thinking";
|
||||
macros.ctx = "196608";
|
||||
proxy = "http://127.0.0.1:\${PORT}";
|
||||
cmd = ''
|
||||
${pkgs.docker}/bin/docker run --rm --device=nvidia.com/gpu=all \
|
||||
--name ''${MODEL_ID} \
|
||||
-e PYTORCH_ALLOC_CONF=expandable_segments:True \
|
||||
-v /mnt/ssd/vLLM:/root/.cache/huggingface \
|
||||
-p ''${PORT}:8000 \
|
||||
--ipc=host vllm/vllm-openai:latest \
|
||||
--served-model-name ''${MODEL_ID} \
|
||||
--model cyankiwi/Qwen3.5-27B-AWQ-4bit \
|
||||
--max-model-len 24576 \
|
||||
--kv-cache-dtype auto \
|
||||
--max-num-seqs 4 \
|
||||
--max-num-batched-tokens 4096 \
|
||||
--enable-chunked-prefill \
|
||||
--gpu-memory-utilization 0.95 \
|
||||
--language-model-only \
|
||||
--speculative-config '{"method":"mtp","num_speculative_tokens":3}' \
|
||||
--enable-prefix-caching \
|
||||
--enforce-eager \
|
||||
--block-size 32 \
|
||||
--swap-space 4 \
|
||||
--tensor-parallel-size 1 \
|
||||
--reasoning-parser qwen3 \
|
||||
--enable-auto-tool-choice \
|
||||
--default-chat-template-kwargs '{"enable_thinking": true}' \
|
||||
--tool-call-parser qwen3_coder
|
||||
'';
|
||||
cmdStop = "docker stop \${MODEL_ID}";
|
||||
|
||||
metadata = {
|
||||
type = [
|
||||
"text-generation"
|
||||
"coding"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
# https://github.com/Lorbus/qwen36-27b-single-3090
|
||||
# Model: Lorbus/Qwen3.6-27B-int4-AutoRound (auto_round int4)
|
||||
# Genesis v7.14+ patches for MTP streaming + tool adherence
|
||||
# Text-only (no vision) to maximize KV budget for ~100k context
|
||||
"vllm-qwen3.6-27b-thinking" = {
|
||||
name = "vLLM Qwen3.6 (27B) - Thinking";
|
||||
macros.ctx = "75000";
|
||||
# Long-text variant - 185K context, text-only (no vision)
|
||||
# TurboQuant 3-bit KV + MTP n=3 + PN12/P104 cliff-closure stack
|
||||
"vllm-qwen3.6-27b-long-text" = {
|
||||
name = "vLLM Qwen3.6 (27B) - Long Text";
|
||||
macros.ctx = "185000";
|
||||
proxy = "http://127.0.0.1:\${PORT}";
|
||||
cmd =
|
||||
let
|
||||
vllmCmd = ''
|
||||
set -e; pip install xxhash pandas scipy -q;
|
||||
python3 -m vllm._genesis.patches.apply_all;
|
||||
python3 /patches/patch_pn12_ffn_pool_anchor.py;
|
||||
python3 /patches/patch_pn12_compile_safe_custom_op.py;
|
||||
python3 /patches/patch_fa_max_seqlen_clamp.py;
|
||||
python3 /patches/patch_tolist_cudagraph.py;
|
||||
exec vllm serve
|
||||
--served-model-name ''${MODEL_ID}
|
||||
@@ -420,10 +381,10 @@ in
|
||||
--dtype float16
|
||||
--tensor-parallel-size 1
|
||||
--max-model-len ''${ctx}
|
||||
--gpu-memory-utilization 0.97
|
||||
--gpu-memory-utilization 0.975
|
||||
--max-num-seqs 1
|
||||
--max-num-batched-tokens 2048
|
||||
--kv-cache-dtype fp8_e5m2
|
||||
--max-num-batched-tokens 4128
|
||||
--kv-cache-dtype turboquant_3bit_nc
|
||||
--language-model-only
|
||||
--trust-remote-code
|
||||
--reasoning-parser qwen3
|
||||
@@ -431,6 +392,7 @@ in
|
||||
--tool-call-parser qwen3_coder
|
||||
--enable-prefix-caching
|
||||
--enable-chunked-prefill
|
||||
--no-scheduler-reserve-full-isl
|
||||
--speculative-config '{\"method\":\"mtp\",\"num_speculative_tokens\":3}'
|
||||
--host 0.0.0.0
|
||||
--port 8000
|
||||
@@ -455,12 +417,110 @@ in
|
||||
-e CUDA_DEVICE_ORDER=PCI_BUS_ID \
|
||||
-e VLLM_ALLOW_LONG_MAX_MODEL_LEN=1 \
|
||||
-e VLLM_MARLIN_USE_ATOMIC_ADD=1 \
|
||||
-e GENESIS_ENABLE_P65_TURBOQUANT_SPEC_CG_DOWNGRADE=1 \
|
||||
-e GENESIS_ENABLE_P66_CUDAGRAPH_SIZE_FILTER=1 \
|
||||
-e GENESIS_ENABLE_P64_QWEN3CODER_MTP_STREAMING=1 \
|
||||
-e GENESIS_ENABLE_P68_AUTO_FORCE_TOOL=1 \
|
||||
-e GENESIS_ENABLE_P69_LONG_CTX_TOOL_REMINDER=1 \
|
||||
-e GENESIS_ENABLE_P101=1 \
|
||||
-e GENESIS_ENABLE_P103=1 \
|
||||
-e GENESIS_ENABLE_PN12_FFN_INTERMEDIATE_POOL=1 \
|
||||
-e GENESIS_ENABLE_PN13_CUDA_GRAPH_LAMBDA_ARITY=1 \
|
||||
-e GENESIS_ENABLE_FA_MAX_SEQLEN_CLAMP=1 \
|
||||
-e GENESIS_ENABLE_PN17_FA2_LSE_CLAMP=1 \
|
||||
-e GENESIS_ENABLE_P37=1 \
|
||||
-v /mnt/ssd/vLLM/Models:/root/.cache/huggingface \
|
||||
-v /mnt/ssd/vLLM/Patches/genesis/vllm/_genesis:/usr/local/lib/python3.12/dist-packages/vllm/_genesis:ro \
|
||||
-v /mnt/ssd/vLLM/Patches/patch_tolist_cudagraph.py:/patches/patch_tolist_cudagraph.py:ro \
|
||||
-v /mnt/ssd/vLLM/Patches/patch_pn12_ffn_pool_anchor.py:/patches/patch_pn12_ffn_pool_anchor.py:ro \
|
||||
-v /mnt/ssd/vLLM/Patches/patch_pn12_compile_safe_custom_op.py:/patches/patch_pn12_compile_safe_custom_op.py:ro \
|
||||
-v /mnt/ssd/vLLM/Patches/patch_fa_max_seqlen_clamp.py:/patches/patch_fa_max_seqlen_clamp.py:ro \
|
||||
-p ''${PORT}:8000 \
|
||||
--entrypoint /bin/bash \
|
||||
vllm/vllm-openai:nightly-07351e0883470724dd5a7e9730ed10e01fc99d08 \
|
||||
-c "${vllmCmdFlat}"
|
||||
'';
|
||||
cmdStop = "docker stop \${MODEL_ID}";
|
||||
|
||||
metadata = {
|
||||
type = [
|
||||
"text-generation"
|
||||
"coding"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
# https://github.com/Lorbus/qwen36-27b-single-3090
|
||||
# Long-vision variant - 140K context with vision tower active
|
||||
# TurboQuant 3-bit KV + MTP n=3 + PN12/P104 cliff-closure stack
|
||||
"vllm-qwen3.6-27b-long-vision" = {
|
||||
name = "vLLM Qwen3.6 (27B) - Long Vision";
|
||||
macros.ctx = "140000";
|
||||
proxy = "http://127.0.0.1:\${PORT}";
|
||||
cmd =
|
||||
let
|
||||
vllmCmd = ''
|
||||
set -e; pip install xxhash pandas scipy -q;
|
||||
python3 -m vllm._genesis.patches.apply_all;
|
||||
python3 /patches/patch_pn12_ffn_pool_anchor.py;
|
||||
python3 /patches/patch_pn12_compile_safe_custom_op.py;
|
||||
python3 /patches/patch_fa_max_seqlen_clamp.py;
|
||||
python3 /patches/patch_tolist_cudagraph.py;
|
||||
exec vllm serve
|
||||
--served-model-name ''${MODEL_ID}
|
||||
--model /root/.cache/huggingface/qwen3.6-27b-autoround-int4
|
||||
--quantization auto_round
|
||||
--dtype float16
|
||||
--tensor-parallel-size 1
|
||||
--max-model-len ''${ctx}
|
||||
--gpu-memory-utilization 0.95
|
||||
--max-num-seqs 1
|
||||
--max-num-batched-tokens 4128
|
||||
--kv-cache-dtype turboquant_3bit_nc
|
||||
--trust-remote-code
|
||||
--reasoning-parser qwen3
|
||||
--enable-auto-tool-choice
|
||||
--tool-call-parser qwen3_coder
|
||||
--enable-prefix-caching
|
||||
--enable-chunked-prefill
|
||||
--no-scheduler-reserve-full-isl
|
||||
--speculative-config '{\"method\":\"mtp\",\"num_speculative_tokens\":3}'
|
||||
--host 0.0.0.0
|
||||
--port 8000
|
||||
'';
|
||||
vllmCmdFlat = builtins.replaceStrings [ "\n" ] [ " " ] vllmCmd;
|
||||
in
|
||||
''
|
||||
${pkgs.docker}/bin/docker run --rm --device=nvidia.com/gpu=all \
|
||||
--name ''${MODEL_ID} \
|
||||
--ipc=host \
|
||||
-e VLLM_WORKER_MULTIPROC_METHOD=spawn \
|
||||
-e NCCL_CUMEM_ENABLE=0 \
|
||||
-e NCCL_P2P_DISABLE=1 \
|
||||
-e VLLM_MEMORY_PROFILER_ESTIMATE_CUDAGRAPHS=1 \
|
||||
-e VLLM_NO_USAGE_STATS=1 \
|
||||
-e PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True,max_split_size_mb:512 \
|
||||
-e VLLM_FLOAT32_MATMUL_PRECISION=high \
|
||||
-e VLLM_USE_FLASHINFER_SAMPLER=1 \
|
||||
-e OMP_NUM_THREADS=1 \
|
||||
-e CUDA_DEVICE_MAX_CONNECTIONS=8 \
|
||||
-e CUDA_VISIBLE_DEVICES=0 \
|
||||
-e CUDA_DEVICE_ORDER=PCI_BUS_ID \
|
||||
-e VLLM_ALLOW_LONG_MAX_MODEL_LEN=1 \
|
||||
-e VLLM_MARLIN_USE_ATOMIC_ADD=1 \
|
||||
-e GENESIS_ENABLE_P65_TURBOQUANT_SPEC_CG_DOWNGRADE=1 \
|
||||
-e GENESIS_ENABLE_P66_CUDAGRAPH_SIZE_FILTER=1 \
|
||||
-e GENESIS_ENABLE_P64_QWEN3CODER_MTP_STREAMING=1 \
|
||||
-e GENESIS_ENABLE_P101=1 \
|
||||
-e GENESIS_ENABLE_P103=1 \
|
||||
-e GENESIS_ENABLE_PN12_FFN_INTERMEDIATE_POOL=1 \
|
||||
-e GENESIS_ENABLE_PN13_CUDA_GRAPH_LAMBDA_ARITY=1 \
|
||||
-e GENESIS_ENABLE_FA_MAX_SEQLEN_CLAMP=1 \
|
||||
-e GENESIS_ENABLE_PN17_FA2_LSE_CLAMP=1 \
|
||||
-v /mnt/ssd/vLLM/Models:/root/.cache/huggingface \
|
||||
-v /mnt/ssd/vLLM/Patches/genesis/vllm/_genesis:/usr/local/lib/python3.12/dist-packages/vllm/_genesis:ro \
|
||||
-v /mnt/ssd/vLLM/Patches/patch_tolist_cudagraph.py:/patches/patch_tolist_cudagraph.py:ro \
|
||||
-v /mnt/ssd/vLLM/Patches/patch_pn12_ffn_pool_anchor.py:/patches/patch_pn12_ffn_pool_anchor.py:ro \
|
||||
-v /mnt/ssd/vLLM/Patches/patch_pn12_compile_safe_custom_op.py:/patches/patch_pn12_compile_safe_custom_op.py:ro \
|
||||
-v /mnt/ssd/vLLM/Patches/patch_fa_max_seqlen_clamp.py:/patches/patch_fa_max_seqlen_clamp.py:ro \
|
||||
-p ''${PORT}:8000 \
|
||||
--entrypoint /bin/bash \
|
||||
vllm/vllm-openai:nightly-07351e0883470724dd5a7e9730ed10e01fc99d08 \
|
||||
|
||||
Reference in New Issue
Block a user