refactor(terminal): filter models by coding type

Change opencode and pi model filtering to use 'coding' type instead of
more generic 'text-generation' type. Update llama-swap model configs to
include 'coding' in metadata type list for relevant models (deepseek-coder,
qwen-coder, mistral, codellama, llama3-8b-instruct-q5).
This commit is contained in:
2026-02-06 08:33:01 -05:00
parent 234c4f2b8b
commit ec15ebb262
3 changed files with 30 additions and 16 deletions

View File

@@ -29,7 +29,10 @@ in
-dev CUDA0
'';
metadata = {
type = [ "text-generation" ];
type = [
"text-generation"
"coding"
];
};
env = [ "GGML_CUDA_ENABLE_UNIFIED_MEMORY=1" ];
};
@@ -56,7 +59,10 @@ in
-dev CUDA0
'';
metadata = {
type = [ "text-generation" ];
type = [
"text-generation"
"coding"
];
};
env = [ "GGML_CUDA_ENABLE_UNIFIED_MEMORY=1" ];
};
@@ -142,7 +148,10 @@ in
-fit off
'';
metadata = {
type = [ "text-generation" ];
type = [
"text-generation"
"coding"
];
};
env = [ "GGML_CUDA_ENABLE_UNIFIED_MEMORY=1" ];
};
@@ -165,7 +174,10 @@ in
-fit off
'';
metadata = {
type = [ "text-generation" ];
type = [
"text-generation"
"coding"
];
};
env = [ "GGML_CUDA_ENABLE_UNIFIED_MEMORY=1" ];
};
@@ -216,7 +228,10 @@ in
-fit off
'';
metadata = {
type = [ "text-generation" ];
type = [
"text-generation"
"coding"
];
};
env = [ "GGML_CUDA_ENABLE_UNIFIED_MEMORY=1" ];
};
@@ -261,7 +276,10 @@ in
-fit off
'';
metadata = {
type = [ "text-generation" ];
type = [
"text-generation"
"coding"
];
};
env = [ "GGML_CUDA_ENABLE_UNIFIED_MEMORY=1" ];
};