feat: add the :ChatGPTCurrentBuffer command

This commit is contained in:
2025-01-02 13:44:42 +01:00
parent 94a0bf059b
commit 00e48586ad
2 changed files with 76 additions and 9 deletions

View File

@@ -83,7 +83,7 @@ end
local function handle_step_by_step_if_needed(prompt, estimate_fn) local function handle_step_by_step_if_needed(prompt, estimate_fn)
local conf = config.load() local conf = config.load()
local token_count = estimate_fn(prompt) local token_count = estimate_fn(prompt)
-- If step-by-step is disabled or token count is within limit, return original prompt -- If step-by-step is disabled or token count is within limit, return the original prompt
if not conf.enable_step_by_step or token_count <= (conf.token_limit or 8000) then if not conf.enable_step_by_step or token_count <= (conf.token_limit or 8000) then
return { prompt } return { prompt }
end end
@@ -91,10 +91,11 @@ local function handle_step_by_step_if_needed(prompt, estimate_fn)
-- If we exceed the token limit, create a single message prompting the user to split tasks -- If we exceed the token limit, create a single message prompting the user to split tasks
local step_prompt = [[ local step_prompt = [[
It appears this request might exceed the model's token limit if done all at once. It appears this request might exceed the model's token limit if done all at once.
Please break down the tasks into smaller steps and handle them one by one. Please break down the tasks into smaller steps and handle them one by one.
At each step, we'll provide relevant files or context if needed. At each step, we'll provide relevant files or context if needed.
Thank you! Thank you!
]] ]]
return { step_prompt } return { step_prompt }
end end
@@ -220,7 +221,7 @@ local function partial_accept(changes)
once = true, once = true,
callback = function() callback = function()
on_write() on_write()
vim.cmd("bd! " .. bufnr) -- auto-close buffer vim.cmd("bd! " .. bufnr)
end end
}) })
@@ -348,13 +349,10 @@ function M.run_chatgpt_command()
local estimate_fn = get_estimate_fn() local estimate_fn = get_estimate_fn()
local chunks = handle_step_by_step_if_needed(prompt, estimate_fn) local chunks = handle_step_by_step_if_needed(prompt, estimate_fn)
-- We either get the original prompt or a single special prompt
copy_to_clipboard(chunks[1]) copy_to_clipboard(chunks[1])
if #chunks == 1 then if #chunks == 1 then
vim.api.nvim_out_write("Prompt copied to clipboard! Paste into ChatGPT.\n") vim.api.nvim_out_write("Prompt copied to clipboard! Paste into ChatGPT.\n")
else else
-- In this revised approach, we no longer generate multiple buffers.
-- We simply rely on the single step-by-step prompt in chunks[1].
vim.api.nvim_out_write("Step-by-step prompt copied to clipboard!\n") vim.api.nvim_out_write("Step-by-step prompt copied to clipboard!\n")
end end
@@ -416,7 +414,6 @@ function M.run_chatgpt_paste_command()
vim.api.nvim_err_writeln("Preview not closed in time. Aborting.") vim.api.nvim_err_writeln("Preview not closed in time. Aborting.")
return return
end end
end
local final_files = data.files local final_files = data.files
if conf.partial_acceptance then if conf.partial_acceptance then
@@ -488,10 +485,9 @@ function M.run_chatgpt_paste_command()
local token_count = estimate_fn(prompt) local token_count = estimate_fn(prompt)
ui.debug_log("Returning requested files. Token count: " .. token_count) ui.debug_log("Returning requested files. Token count: " .. token_count)
-- We simply check if step-by-step is enabled, then provide a short note if needed
if token_count > (conf.token_limit or 8000) and conf.enable_step_by_step then if token_count > (conf.token_limit or 8000) and conf.enable_step_by_step then
local step_message = [[ local step_message = [[
It appears this requested data is quite large. Please lets split the task into smaller steps It appears this requested data is quite large. Please split the task into smaller steps
and continue step by step. and continue step by step.
Which files would you need for the first step? Which files would you need for the first step?
]] ]]
@@ -511,4 +507,74 @@ function M.run_chatgpt_paste_command()
end end
end end
----------------------------------------------------------------------------
-- run_chatgpt_current_buffer_command
----------------------------------------------------------------------------
function M.run_chatgpt_current_buffer_command()
local conf = config.load()
ui.debug_log("Running :ChatGPTCurrentBuffer command.")
-- Get the content of the current buffer as user instructions
local lines = vim.api.nvim_buf_get_lines(0, 0, -1, false)
local user_input = table.concat(lines, "\n")
-- Possibly let user select directories if interactive_file_selection is true
local dirs = conf.directories or {"."}
if conf.interactive_file_selection then
dirs = ui.pick_directories(dirs)
if #dirs == 0 then
dirs = conf.directories
end
end
local project_structure = context.get_project_structure(dirs)
local initial_files = conf.initial_files or {}
local included_sections = {}
for _, item in ipairs(initial_files) do
local root = vim.fn.getcwd()
local full_path = root .. "/" .. item
if is_directory(full_path) then
local dir_files = context.get_project_files({item})
for _, f in ipairs(dir_files) do
local path = root .. "/" .. f
local data = read_file(path)
if data then
table.insert(included_sections, "\nFile: `" .. f .. "`\n```\n" .. data .. "\n```\n")
end
end
else
local data = read_file(full_path)
if data then
table.insert(included_sections, "\nFile: `" .. item .. "`\n```\n" .. data .. "\n```\n")
end
end
end
local initial_sections = {
"### Basic Prompt Instructions:\n",
conf.initial_prompt .. "\n\n\n",
"### User Instructions:\n",
user_input .. "\n\n\n",
"### Context/Data:\n",
"Project name: " .. (conf.project_name or "") .. "\n",
"Project Structure:\n",
project_structure,
table.concat(included_sections, "\n")
}
local prompt = table.concat(initial_sections, "\n")
store_prompt_for_reference(prompt)
local estimate_fn = get_estimate_fn()
local chunks = handle_step_by_step_if_needed(prompt, estimate_fn)
copy_to_clipboard(chunks[1])
if #chunks == 1 then
vim.api.nvim_out_write("Prompt (from current buffer) copied to clipboard! Paste into ChatGPT.\n")
else
vim.api.nvim_out_write("Step-by-step prompt (from current buffer) copied to clipboard!\n")
end
end
return M return M

View File

@@ -1,2 +1,3 @@
command! ChatGPT lua require('chatgpt_nvim').run_chatgpt_command() command! ChatGPT lua require('chatgpt_nvim').run_chatgpt_command()
command! ChatGPTPaste lua require('chatgpt_nvim').run_chatgpt_paste_command() command! ChatGPTPaste lua require('chatgpt_nvim').run_chatgpt_paste_command()
command! ChatGPTCurrentBuffer lua require('chatgpt_nvim').run_chatgpt_current_buffer_command()