chore(llm): clean up models & fix llama-cpp issue

This commit is contained in:
2025-12-10 12:12:50 -05:00
parent 30934c8f7c
commit c1a650a90e
12 changed files with 501 additions and 135 deletions

View File

@@ -1,9 +1,10 @@
local llm_endpoint = "https://llm-api.va.reichard.io"
local llm_model = "qwen3-coder-30b-instruct"
local llm_assistant_model = "gpt-oss-20b-thinking"
local llm_infill_model = "qwen2.5-coder-3b-instruct"
-- Default Llama - Toggle Llama & Copilot
vim.g.copilot_filetypes = { ["*"] = false }
local current_mode = "llama"
-- vim.g.copilot_filetypes = { ["*"] = false }
local current_mode = "copilot"
local function toggle_llm_fim_provider()
if current_mode == "llama" then
vim.g.copilot_filetypes = { ["*"] = true }
@@ -24,8 +25,10 @@ vim.keymap.set("n", "<leader>cf", toggle_llm_fim_provider, { desc = "Toggle FIM
-- Configure LLama LLM FIM
vim.g.llama_config = {
endpoint = llm_endpoint .. "/infill",
model = llm_model,
n_predict = 1024,
model = llm_infill_model,
n_predict = 2048,
ring_n_chunks = 32,
enable_at_startup = false,
}
-- Configure Code Companion
@@ -39,7 +42,7 @@ require("codecompanion").setup({
return require("codecompanion.adapters").extend("openai_compatible", {
name = "llama-swap",
formatted_name = "LlamaSwap",
schema = { model = { default = llm_model } },
schema = { model = { default = llm_assistant_model } },
env = { url = llm_endpoint },
})
end,