feat: add the possibility for debug commands

This commit is contained in:
2025-01-04 16:52:02 +01:00
parent 381c5108d6
commit 3859f5531a
2 changed files with 135 additions and 47 deletions

View File

@@ -7,15 +7,11 @@ directories:
initial_files: initial_files:
- "README.md" - "README.md"
debug: false debug: false
preview_changes: false
# New config flags: interactive_file_selection: false
preview_changes: true partial_acceptance: false
interactive_file_selection: true improved_debug: false
partial_acceptance: true enable_debug_commands: true
improved_debug: true token_limit: 128000
enable_chunking: false
# We rely on token_limit to decide chunk sizes enable_step_by_step: true
token_limit: 3000
# Enable chunking if we exceed the token limit
enable_chunking: true

View File

@@ -53,7 +53,6 @@ local function is_directory(path)
return stat and stat.type == "directory" return stat and stat.type == "directory"
end end
-- We'll create two token estimate functions: one basic, one improved
local function estimate_tokens_basic(text) local function estimate_tokens_basic(text)
local approx_chars_per_token = 4 local approx_chars_per_token = 4
local length = #text local length = #text
@@ -61,7 +60,6 @@ local function estimate_tokens_basic(text)
end end
local function estimate_tokens_improved(text) local function estimate_tokens_improved(text)
-- Word-based approach, assume ~0.75 token/word
local words = #vim.split(text, "%s+") local words = #vim.split(text, "%s+")
local approximate_tokens = math.floor(words * 0.75) local approximate_tokens = math.floor(words * 0.75)
ui.debug_log("Using improved token estimate: " .. approximate_tokens .. " tokens") ui.debug_log("Using improved token estimate: " .. approximate_tokens .. " tokens")
@@ -77,29 +75,23 @@ local function get_estimate_fn()
end end
end end
--------------------------------------------------------------------------------- -- Handle large prompts by splitting them if needed
-- Step-by-Step Handling (replaces chunking)
---------------------------------------------------------------------------------
local function handle_step_by_step_if_needed(prompt, estimate_fn) local function handle_step_by_step_if_needed(prompt, estimate_fn)
local conf = config.load() local conf = config.load()
local token_count = estimate_fn(prompt) local token_count = estimate_fn(prompt)
-- If step-by-step is disabled or token count is within limit, return the original prompt
if not conf.enable_step_by_step or token_count <= (conf.token_limit or 8000) then if not conf.enable_step_by_step or token_count <= (conf.token_limit or 8000) then
return { prompt } return { prompt }
end end
-- If we exceed the token limit, create a single message prompting the user to split tasks
local step_prompt = [[ local step_prompt = [[
It appears this request might exceed the model's token limit if done all at once. It appears this request might exceed the model's token limit if done all at once.
Please break down the tasks into smaller steps and handle them one by one. Please break down the tasks into smaller steps and handle them one by one.
At each step, we'll provide relevant files or context if needed. At each step, we'll provide relevant files or context if needed.
Thank you! Thank you!
]] ]]
return { step_prompt } return { step_prompt }
end end
-- Close an existing buffer by name (if it exists)
local function close_existing_buffer_by_name(pattern) local function close_existing_buffer_by_name(pattern)
for _, b in ipairs(vim.api.nvim_list_bufs()) do for _, b in ipairs(vim.api.nvim_list_bufs()) do
local name = vim.api.nvim_buf_get_name(b) local name = vim.api.nvim_buf_get_name(b)
@@ -109,10 +101,8 @@ local function close_existing_buffer_by_name(pattern)
end end
end end
-- Show the user a preview buffer with the proposed changes (unchanged).
local function preview_changes(changes) local function preview_changes(changes)
close_existing_buffer_by_name("ChatGPT_Changes_Preview$") close_existing_buffer_by_name("ChatGPT_Changes_Preview$")
local bufnr = vim.api.nvim_create_buf(false, true) local bufnr = vim.api.nvim_create_buf(false, true)
vim.api.nvim_buf_set_name(bufnr, "ChatGPT_Changes_Preview") vim.api.nvim_buf_set_name(bufnr, "ChatGPT_Changes_Preview")
vim.api.nvim_buf_set_option(bufnr, "filetype", "diff") vim.api.nvim_buf_set_option(bufnr, "filetype", "diff")
@@ -139,10 +129,8 @@ local function preview_changes(changes)
vim.cmd("buffer " .. bufnr) vim.cmd("buffer " .. bufnr)
end end
-- Minimal partial acceptance from previous example
local function partial_accept(changes) local function partial_accept(changes)
close_existing_buffer_by_name("ChatGPT_Partial_Accept$") close_existing_buffer_by_name("ChatGPT_Partial_Accept$")
local bufnr = vim.api.nvim_create_buf(false, true) local bufnr = vim.api.nvim_create_buf(false, true)
vim.api.nvim_buf_set_name(bufnr, "ChatGPT_Partial_Accept") vim.api.nvim_buf_set_name(bufnr, "ChatGPT_Partial_Accept")
vim.api.nvim_buf_set_option(bufnr, "filetype", "diff") vim.api.nvim_buf_set_option(bufnr, "filetype", "diff")
@@ -242,10 +230,8 @@ local function partial_accept(changes)
return final_changes return final_changes
end end
-- Utility to store generated prompt in a scratch buffer
local function store_prompt_for_reference(prompt) local function store_prompt_for_reference(prompt)
close_existing_buffer_by_name("ChatGPT_Generated_Prompt$") close_existing_buffer_by_name("ChatGPT_Generated_Prompt$")
local bufnr = vim.api.nvim_create_buf(false, true) local bufnr = vim.api.nvim_create_buf(false, true)
vim.api.nvim_buf_set_name(bufnr, "ChatGPT_Generated_Prompt") vim.api.nvim_buf_set_name(bufnr, "ChatGPT_Generated_Prompt")
vim.api.nvim_buf_set_option(bufnr, "filetype", "markdown") vim.api.nvim_buf_set_option(bufnr, "filetype", "markdown")
@@ -264,14 +250,85 @@ local function store_prompt_for_reference(prompt)
vim.cmd("buffer " .. bufnr) vim.cmd("buffer " .. bufnr)
end end
---------------------------------------------------------------------------- local function list_files_in_dir(dir)
-- run_chatgpt_command local handle = vim.loop.fs_scandir(dir)
---------------------------------------------------------------------------- local entries = {}
if handle then
while true do
local name, t = vim.loop.fs_scandir_next(handle)
if not name then
break
end
if t == "directory" then
table.insert(entries, dir .. "/" .. name .. "/")
else
table.insert(entries, dir .. "/" .. name)
end
end
end
return entries
end
local function grep_in_file(search_string, filepath)
local content = read_file(filepath)
if not content then
return "Could not read file: " .. filepath
end
local results = {}
local line_num = 0
for line in content:gmatch("([^\n]*)\n?") do
line_num = line_num + 1
if line:find(search_string, 1, true) then
table.insert(results, filepath .. ":" .. line_num .. ":" .. line)
end
end
if #results == 0 then
return "No matches in " .. filepath
else
return table.concat(results, "\n")
end
end
local function execute_debug_command(cmd)
if type(cmd) ~= "table" or not cmd.command then
return "Invalid command object."
end
local command = cmd.command
if command == "list" then
local dir = cmd.dir or "."
return "Listing files in: " .. dir .. "\n" .. table.concat(list_files_in_dir(dir), "\n")
elseif command == "grep" then
local pattern = cmd.pattern
local target = cmd.target
if not pattern or not target then
return "Usage for grep: {command='grep', pattern='<text>', target='<file_or_directory>'}"
end
local stat = vim.loop.fs_stat(target)
if not stat then
return "Cannot grep: target path does not exist"
end
if stat.type == "directory" then
local all_files = list_files_in_dir(target)
local results = {}
for _, f in ipairs(all_files) do
local fstat = vim.loop.fs_stat(f)
if fstat and fstat.type == "file" then
table.insert(results, grep_in_file(pattern, f))
end
end
return table.concat(results, "\n")
else
return grep_in_file(pattern, target)
end
else
return "Unknown command: " .. command
end
end
function M.run_chatgpt_command() function M.run_chatgpt_command()
local conf = config.load() local conf = config.load()
ui.debug_log("Running :ChatGPT command.") ui.debug_log("Running :ChatGPT command.")
-- Possibly let user select directories if interactive_file_selection is true
local dirs = conf.directories or {"."} local dirs = conf.directories or {"."}
if conf.interactive_file_selection then if conf.interactive_file_selection then
dirs = ui.pick_directories(dirs) dirs = ui.pick_directories(dirs)
@@ -280,9 +337,7 @@ function M.run_chatgpt_command()
end end
end end
-- Close existing prompt buffer if open
close_existing_buffer_by_name("ChatGPT_Prompt.md$") close_existing_buffer_by_name("ChatGPT_Prompt.md$")
local bufnr = vim.api.nvim_create_buf(false, false) local bufnr = vim.api.nvim_create_buf(false, false)
vim.api.nvim_buf_set_name(bufnr, "ChatGPT_Prompt.md") vim.api.nvim_buf_set_name(bufnr, "ChatGPT_Prompt.md")
vim.api.nvim_buf_set_option(bufnr, "filetype", "markdown") vim.api.nvim_buf_set_option(bufnr, "filetype", "markdown")
@@ -343,6 +398,25 @@ function M.run_chatgpt_command()
table.concat(included_sections, "\n") table.concat(included_sections, "\n")
} }
if conf.enable_debug_commands then
table.insert(initial_sections, "\n### Debug Commands Info:\n")
table.insert(initial_sections, [[
If you need debugging commands, include them in your YAML response as follows:
```yaml
commands:
- command: "list"
dir: "some/directory"
- command: "grep"
pattern: "searchString"
target: "path/to/file/or/directory"
```
When these commands are present and enable_debug_commands is true, I'll execute them and return the results in the clipboard.
]])
end
local prompt = table.concat(initial_sections, "\n") local prompt = table.concat(initial_sections, "\n")
store_prompt_for_reference(prompt) store_prompt_for_reference(prompt)
@@ -363,9 +437,6 @@ function M.run_chatgpt_command()
vim.cmd("buffer " .. bufnr) vim.cmd("buffer " .. bufnr)
end end
----------------------------------------------------------------------------
-- run_chatgpt_paste_command
----------------------------------------------------------------------------
function M.run_chatgpt_paste_command() function M.run_chatgpt_paste_command()
local conf = config.load() local conf = config.load()
ui.debug_log("Running :ChatGPTPaste command.") ui.debug_log("Running :ChatGPTPaste command.")
@@ -381,6 +452,17 @@ function M.run_chatgpt_paste_command()
return return
end end
if data.commands and conf.enable_debug_commands then
local results = {}
for _, cmd in ipairs(data.commands) do
table.insert(results, execute_debug_command(cmd))
end
local output = table.concat(results, "\n\n")
copy_to_clipboard(output)
print("Debug command results copied to clipboard!")
return
end
if data.project_name and data.files then if data.project_name and data.files then
ui.debug_log("Received project_name and files in response.") ui.debug_log("Received project_name and files in response.")
if data.project_name ~= conf.project_name then if data.project_name ~= conf.project_name then
@@ -414,7 +496,7 @@ function M.run_chatgpt_paste_command()
vim.api.nvim_err_writeln("Preview not closed in time. Aborting.") vim.api.nvim_err_writeln("Preview not closed in time. Aborting.")
return return
end end
end -- ← ADDED MISSING END HERE end
local final_files = data.files local final_files = data.files
if conf.partial_acceptance then if conf.partial_acceptance then
@@ -452,7 +534,6 @@ function M.run_chatgpt_paste_command()
end end
else else
-- Intermediate request for more files
local requested_paths = {} local requested_paths = {}
for _, fileinfo in ipairs(data.files) do for _, fileinfo in ipairs(data.files) do
if fileinfo.path then if fileinfo.path then
@@ -482,7 +563,6 @@ function M.run_chatgpt_paste_command()
local prompt = table.concat(sections, "\n") local prompt = table.concat(sections, "\n")
local estimate_fn = get_estimate_fn() local estimate_fn = get_estimate_fn()
local token_count = estimate_fn(prompt) local token_count = estimate_fn(prompt)
ui.debug_log("Returning requested files. Token count: " .. token_count) ui.debug_log("Returning requested files. Token count: " .. token_count)
@@ -508,18 +588,11 @@ function M.run_chatgpt_paste_command()
end end
end end
----------------------------------------------------------------------------
-- run_chatgpt_current_buffer_command
----------------------------------------------------------------------------
function M.run_chatgpt_current_buffer_command() function M.run_chatgpt_current_buffer_command()
local conf = config.load() local conf = config.load()
ui.debug_log("Running :ChatGPTCurrentBuffer command.") ui.debug_log("Running :ChatGPTCurrentBuffer command.")
-- Get the content of the current buffer as user instructions
local lines = vim.api.nvim_buf_get_lines(0, 0, -1, false) local lines = vim.api.nvim_buf_get_lines(0, 0, -1, false)
local user_input = table.concat(lines, "\n") local user_input = table.concat(lines, "\n")
-- Possibly let user select directories if interactive_file_selection is true
local dirs = conf.directories or {"."} local dirs = conf.directories or {"."}
if conf.interactive_file_selection then if conf.interactive_file_selection then
dirs = ui.pick_directories(dirs) dirs = ui.pick_directories(dirs)
@@ -564,6 +637,25 @@ function M.run_chatgpt_current_buffer_command()
table.concat(included_sections, "\n") table.concat(included_sections, "\n")
} }
if conf.enable_debug_commands then
table.insert(initial_sections, "\n### Debug Commands Info:\n")
table.insert(initial_sections, [[
If you need debugging commands, include them in your YAML response as follows:
```yaml
commands:
- command: "list"
dir: "some/directory"
- command: "grep"
pattern: "searchString"
target: "path/to/file/or/directory"
```
When these commands are present and enable_debug_commands is true, I'll execute them and return the results in the clipboard.
]])
end
local prompt = table.concat(initial_sections, "\n") local prompt = table.concat(initial_sections, "\n")
store_prompt_for_reference(prompt) store_prompt_for_reference(prompt)