-- Configure LLama LLM vim.g.llama_config = { endpoint = "http://10.0.20.100:8080/infill", -- model = "qwen2.5-coder-7b-instruct", model = "qwen3-coder-30b-instruct", n_predict = 1024, -- api_key = "", -- n_prefix = 256, -- n_suffix = 64, -- t_max_prompt_ms = 500, -- t_max_predict_ms = 500, -- show_info = 2, -- auto_fim = true, -- max_line_suffix = 8, -- max_cache_keys = 256, -- ring_n_chunks = 8, -- ring_chunk_size = 32, -- ring_scope = 512, -- ring_update_ms = 1000, }