feat: added llm tools

This commit is contained in:
David Ibia
2025-07-21 17:03:25 +01:00
parent f0093997bf
commit 13ddbe2f56
3 changed files with 181 additions and 0 deletions

View File

@@ -0,0 +1,40 @@
local codecompanion = require("codecompanion")
codecompanion.setup({
strategies = {
chat = {
adapter = "openai",
},
},
extensions = {
mcphub = {
callback = "mcphub.extensions.codecompanion",
opts = {
make_vars = true,
make_slash_commands = true,
show_result_in_chat = true,
},
},
},
})
local opts = {
noremap = true,
silent = true,
}
vim.keymap.set("n", "<leader>ac", function()
vim.cmd("CodeCompanionChat")
end, opts)
vim.keymap.set({ "n", "v" }, "<leader>ai", function()
local input = vim.fn.input("[Code Companion Inline]: ")
local trimmed_input = vim.fn.trim(input)
if trimmed_input == "" then
vim.notify("No input provided for inline code companion", vim.log.levels.WARN)
return
end
vim.cmd("CodeCompanion" .. " " .. vim.fn.shellescape(trimmed_input))
end, opts)

View File

@@ -0,0 +1,36 @@
local gen = require("gen")
gen.setup({
-- Custom Parameters (with defaults)
model = "deepseek-r1:7b", -- The default model to use.
quit_map = "q", -- set keymap to close the response window
retry_map = "<c-r>", -- set keymap to re-send the current prompt
accept_map = "<c-cr>", -- set keymap to replace the previous selection with the last result
host = "localhost", -- The host running the Ollama service.
port = "11434", -- The port on which the Ollama service is listening.
display_mode = "split", -- The display mode. Can be "float" or "split" or "horizontal-split".
show_prompt = false, -- Shows the prompt submitted to Ollama. Can be true (3 lines) or "full".
show_model = false, -- Displays which model you are using at the beginning of your chat session.
no_auto_close = false, -- Never closes the window automatically.
file = false, -- Write the payload to a temporary file to keep the command short.
hidden = false, -- Hide the generation window (if true, will implicitly set `prompt.replace = true`), requires Neovim >= 0.10
init = function(options)
pcall(io.popen, "ollama serve > /dev/null 2>&1 &")
end,
-- Function to initialize Ollama
command = function(options)
local body = { model = options.model, stream = true }
return "curl --silent --no-buffer -X POST http://"
.. options.host
.. ":"
.. options.port
.. "/api/chat -d $body"
end,
-- The command for the Ollama service. You can use placeholders $prompt, $model and $body (shellescaped).
-- This can also be a command string.
-- The executed command must return a JSON object with { response, context }
-- (context property is optional).
-- list_models = '<omitted lua function>', -- Retrieves a list of model names
result_filetype = "markdown", -- Configure filetype of the result buffer
debug = false, -- Prints errors and the command which is run.
})

105
lua/absolute/after/llm.lua Normal file
View File

@@ -0,0 +1,105 @@
local llm = require("llm")
local llmCompletion = require("llm.completion")
llm.setup({
api_token = nil, -- cf Install paragraph
model = "codellama:13b", -- the model ID, behavior depends on backend
backend = "ollama", -- backend ID, "huggingface" | "ollama" | "openai" | "tgi"
url = "http://localhost:11434", -- the http url of the backend
-- tokens_to_clear = { "<|endoftext|>" }, -- tokens to remove from the model's output
tokens_to_clear = { "<EOT>" },
-- parameters that are added to the request body, values are arbitrary, you can set any field:value pair here it will be passed as is to the backend
request_body = {
parameters = {
-- max_new_tokens = 60,
temperature = 0.2,
top_p = 0.95,
},
},
-- set this if the model supports fill in the middle
-- fim = {
-- enabled = false,
-- prefix = "<fim_prefix>",
-- middle = "<fim_middle>",
-- suffix = "<fim_suffix>",
-- },
fim = {
enabled = true,
prefix = "<PRE> ",
middle = " <MID>",
suffix = " <SUF>",
},
debounce_ms = 150,
accept_keymap = nil,
dismiss_keymap = nil,
tls_skip_verify_insecure = false,
-- llm-ls configuration, cf llm-ls section
lsp = {
bin_path = vim.api.nvim_call_function("stdpath", { "data" }) .. "/mason/bin/llm-ls",
cmd_env = { LLM_LOG_LEVEL = "DEBUG" },
},
-- tokenizer = nil, -- cf Tokenizer paragraph
tokenizer = {
repository = "codellama/CodeLlama-13b-hf",
},
context_window = 4096,
enable_suggestions_on_startup = false,
enable_suggestions_on_files = { "*.py", "*.js", "*.ts", "*.astro", "*.vue", "*.lua" },
disable_url_path_completion = false, -- cf Backend
})
vim.keymap.set("i", "<Tab>", function()
if llmCompletion.shown_suggestion ~= nil then
llmCompletion.complete()
else
local keys = vim.api.nvim_replace_termcodes("<Tab>", true, false, true)
vim.api.nvim_feedkeys(keys, "n", false)
end
end, { noremap = true, silent = true })
vim.keymap.set("n", "<Tab>", function()
if llmCompletion.shown_suggestion ~= nil then
llmCompletion.complete()
else
local keys = vim.api.nvim_replace_termcodes("<Tab>", true, false, true)
vim.api.nvim_feedkeys(keys, "n", false)
end
end, { noremap = true, silent = true })
-- Debounce helper
local debounce = function(func, timeout)
if type(func) ~= "function" then
error("Expected a function")
end
if type(timeout) ~= "number" or timeout <= 0 then
error("Expected a positive number for timeout")
end
local timer = vim.uv.new_timer()
return function(...)
local args = { ... }
timer:stop() -- Reset the timer
timer:start(
timeout,
0,
vim.schedule_wrap(function()
func(unpack(args))
end)
)
end
end
-- Define the function to be executed
local function on_text_change()
llmCompletion.lsp_suggest()
end
-- Debounce the function
local debounced_on_text_change = debounce(on_text_change, 3000)
-- Set up the autocommand
vim.api.nvim_create_autocmd({ "InsertCharPre" }, {
pattern = "*",
callback = debounced_on_text_change,
desc = "Run method after typing stops with debounce",
})