replace gen.nvim with avante
This commit is contained in:
parent
9452693037
commit
ffc2d4c13d
2 changed files with 76 additions and 48 deletions
76
lua/plugins/avante.lua
Normal file
76
lua/plugins/avante.lua
Normal file
|
@ -0,0 +1,76 @@
|
|||
return {
|
||||
"yetone/avante.nvim",
|
||||
event = "VeryLazy",
|
||||
lazy = false,
|
||||
version = false, -- set this if you want to always pull the latest change
|
||||
opts = {
|
||||
provider = "openai",
|
||||
openai = {
|
||||
api_key_name = "cmd:cat ~/.openai",
|
||||
},
|
||||
provider = "ollama",
|
||||
vendors = {
|
||||
---@type AvanteProvider
|
||||
ollama = {
|
||||
["local"] = true,
|
||||
endpoint = "127.0.0.1:11434/v1",
|
||||
model = "codegemma",
|
||||
parse_curl_args = function(opts, code_opts)
|
||||
return {
|
||||
url = opts.endpoint .. "/chat/completions",
|
||||
headers = {
|
||||
["Accept"] = "application/json",
|
||||
["Content-Type"] = "application/json",
|
||||
},
|
||||
body = {
|
||||
model = opts.model,
|
||||
messages = require("avante.providers").copilot.parse_message(code_opts), -- you can make your own message, but this is very advanced
|
||||
max_tokens = 2048,
|
||||
stream = true,
|
||||
},
|
||||
}
|
||||
end,
|
||||
parse_response_data = function(data_stream, event_state, opts)
|
||||
require("avante.providers").openai.parse_response(data_stream, event_state, opts)
|
||||
end,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
-- if you want to build from source then do `make BUILD_FROM_SOURCE=true`
|
||||
build = "make",
|
||||
-- build = "powershell -ExecutionPolicy Bypass -File Build.ps1 -BuildFromSource false" -- for windows
|
||||
dependencies = {
|
||||
"nvim-treesitter/nvim-treesitter",
|
||||
"stevearc/dressing.nvim",
|
||||
"nvim-lua/plenary.nvim",
|
||||
"MunifTanjim/nui.nvim",
|
||||
--- The below dependencies are optional,
|
||||
"nvim-tree/nvim-web-devicons", -- or echasnovski/mini.icons
|
||||
{
|
||||
-- support for image pasting
|
||||
"HakonHarnes/img-clip.nvim",
|
||||
event = "VeryLazy",
|
||||
opts = {
|
||||
-- recommended settings
|
||||
default = {
|
||||
embed_image_as_base64 = false,
|
||||
prompt_for_file_name = false,
|
||||
drag_and_drop = {
|
||||
insert_mode = true,
|
||||
},
|
||||
-- required for Windows users
|
||||
use_absolute_path = true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
-- Make sure to set this up properly if you have lazy=true
|
||||
"MeanderingProgrammer/render-markdown.nvim",
|
||||
opts = {
|
||||
file_types = { "markdown", "Avante" },
|
||||
},
|
||||
ft = { "markdown", "Avante" },
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,48 +0,0 @@
|
|||
return {
|
||||
"David-Kunz/gen.nvim",
|
||||
lazy = true,
|
||||
keys = {
|
||||
{ "<leader>ai", ":Gen<CR>", mode = { "n", "v" }, desc = "AI tools using Ollama" },
|
||||
{ "<leader>aa", ":Gen Ask<CR>", mode = { "n", "v" }, desc = "[A]I [A]sk" },
|
||||
{
|
||||
"<leader>am",
|
||||
function()
|
||||
require("gen").select_model()
|
||||
end,
|
||||
mode = { "n", "v" },
|
||||
desc = "Select [A]I [m]odel",
|
||||
},
|
||||
},
|
||||
config = function()
|
||||
require("gen").setup({
|
||||
{
|
||||
model = "codellama",
|
||||
host = "localhost",
|
||||
port = "11434",
|
||||
quit_map = "q",
|
||||
retry_map = "<c-r>",
|
||||
init = function(options)
|
||||
pcall(io.popen, "ollama serve > /dev/null 2>&1 &")
|
||||
end,
|
||||
command = function(options)
|
||||
local body = { model = options.model, stream = true }
|
||||
return "curl --silent --no-buffer -X POST http://"
|
||||
.. options.host
|
||||
.. ":"
|
||||
.. options.port
|
||||
.. "/api/chat -d $body"
|
||||
end,
|
||||
display_mode = "split", -- "split" or "float"
|
||||
show_prompt = true,
|
||||
show_model = true,
|
||||
no_auto_close = false,
|
||||
debug = false,
|
||||
},
|
||||
})
|
||||
require("gen").prompts["Fix_Code"] = {
|
||||
prompt = "Fix the following code. Only ouput the result in format ```$filetype\n...\n```:\n```$filetype\n$text\n```",
|
||||
replace = true,
|
||||
extract = "```$filetype\n(.-)```",
|
||||
}
|
||||
end,
|
||||
}
|
Loading…
Reference in a new issue