feat: remove chunking and switch it to a multi step process instead if tokens are too high
This commit is contained in:
@@ -77,18 +77,25 @@ local function get_estimate_fn()
|
||||
end
|
||||
end
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
-- CHUNKING
|
||||
-----------------------------------------------------------------------------
|
||||
local function handle_chunking_if_needed(prompt, estimate_fn)
|
||||
---------------------------------------------------------------------------------
|
||||
-- Step-by-Step Handling (replaces chunking)
|
||||
---------------------------------------------------------------------------------
|
||||
local function handle_step_by_step_if_needed(prompt, estimate_fn)
|
||||
local conf = config.load()
|
||||
local token_count = estimate_fn(prompt)
|
||||
if not conf.enable_chunking or token_count <= (conf.token_limit or 8000) then
|
||||
-- If step-by-step is disabled or token count is within limit, return original prompt
|
||||
if not conf.enable_step_by_step or token_count <= (conf.token_limit or 8000) then
|
||||
return { prompt }
|
||||
end
|
||||
|
||||
local chunks = ui.chunkify(prompt, estimate_fn, conf.token_limit or 8000)
|
||||
return chunks
|
||||
-- If we exceed the token limit, create a single message prompting the user to split tasks
|
||||
local step_prompt = [[
|
||||
It appears this request might exceed the model's token limit if done all at once.
|
||||
Please break down the tasks into smaller steps and handle them one by one.
|
||||
At each step, we'll provide relevant files or context if needed.
|
||||
Thank you!
|
||||
]]
|
||||
return { step_prompt }
|
||||
end
|
||||
|
||||
-- Close an existing buffer by name (if it exists)
|
||||
@@ -339,33 +346,16 @@ function M.run_chatgpt_command()
|
||||
store_prompt_for_reference(prompt)
|
||||
|
||||
local estimate_fn = get_estimate_fn()
|
||||
local chunks = handle_chunking_if_needed(prompt, estimate_fn)
|
||||
local chunks = handle_step_by_step_if_needed(prompt, estimate_fn)
|
||||
|
||||
-- We either get the original prompt or a single special prompt
|
||||
copy_to_clipboard(chunks[1])
|
||||
if #chunks == 1 then
|
||||
copy_to_clipboard(chunks[1])
|
||||
vim.api.nvim_out_write("Prompt copied to clipboard! Paste into ChatGPT.\n")
|
||||
else
|
||||
-- Multiple chunks. We'll create separate scratch buffers
|
||||
for i, chunk in ipairs(chunks) do
|
||||
close_existing_buffer_by_name("ChatGPT_Generated_Chunk_" .. i .. "$")
|
||||
|
||||
local cbuf = vim.api.nvim_create_buf(false, true)
|
||||
vim.api.nvim_buf_set_name(cbuf, "ChatGPT_Generated_Chunk_" .. i)
|
||||
vim.api.nvim_buf_set_option(cbuf, "filetype", "markdown")
|
||||
|
||||
local lines = {
|
||||
"# Chunk " .. i .. " of " .. #chunks .. ":",
|
||||
"# Copy/paste this chunk into ChatGPT, then come back and copy the next chunk as needed.",
|
||||
""
|
||||
}
|
||||
vim.list_extend(lines, vim.split(chunk, "\n"))
|
||||
vim.api.nvim_buf_set_lines(cbuf, 0, -1, false, lines)
|
||||
|
||||
if i == 1 then
|
||||
copy_to_clipboard(chunk)
|
||||
vim.api.nvim_out_write("Copied chunk #1 of " .. #chunks .. " to clipboard!\n")
|
||||
end
|
||||
end
|
||||
-- In this revised approach, we no longer generate multiple buffers.
|
||||
-- We simply rely on the single step-by-step prompt in chunks[1].
|
||||
vim.api.nvim_out_write("Step-by-step prompt copied to clipboard!\n")
|
||||
end
|
||||
|
||||
vim.api.nvim_buf_set_option(bufnr, "modified", false)
|
||||
@@ -498,28 +488,17 @@ function M.run_chatgpt_paste_command()
|
||||
local token_count = estimate_fn(prompt)
|
||||
ui.debug_log("Returning requested files. Token count: " .. token_count)
|
||||
|
||||
if token_count > (conf.token_limit or 8000) then
|
||||
if conf.enable_chunking then
|
||||
local chunks = ui.chunkify(prompt, estimate_fn, conf.token_limit or 8000)
|
||||
for i, chunk in ipairs(chunks) do
|
||||
local cbuf = vim.api.nvim_create_buf(false, true)
|
||||
vim.api.nvim_buf_set_name(cbuf, "ChatGPT_Requested_Files_Chunk_" .. i)
|
||||
local lines = {
|
||||
"# Chunk " .. i .. " of " .. #chunks .. ":",
|
||||
"# Copy/paste this chunk into ChatGPT, then come back and copy next chunk as needed.",
|
||||
""
|
||||
}
|
||||
vim.list_extend(lines, vim.split(chunk, "\n"))
|
||||
vim.api.nvim_buf_set_lines(cbuf, 0, -1, false, lines)
|
||||
|
||||
if i == 1 then
|
||||
copy_to_clipboard(chunk)
|
||||
print("Copied chunk #1 of " .. #chunks .. " to clipboard!")
|
||||
end
|
||||
end
|
||||
else
|
||||
vim.api.nvim_err_writeln("Too many requested files. Exceeds token limit.")
|
||||
end
|
||||
-- We simply check if step-by-step is enabled, then provide a short note if needed
|
||||
if token_count > (conf.token_limit or 8000) and conf.enable_step_by_step then
|
||||
local step_message = [[
|
||||
It appears this requested data is quite large. Please consider handling these files step by step.
|
||||
Let me know how you would like to proceed.
|
||||
]]
|
||||
copy_to_clipboard(step_message)
|
||||
print("Step-by-step guidance copied to clipboard!")
|
||||
return
|
||||
elseif token_count > (conf.token_limit or 8000) then
|
||||
vim.api.nvim_err_writeln("Requested files exceed token limit. No step-by-step support enabled.")
|
||||
return
|
||||
end
|
||||
|
||||
@@ -531,4 +510,4 @@ function M.run_chatgpt_paste_command()
|
||||
end
|
||||
end
|
||||
|
||||
return M
|
||||
return M
|
||||
|
||||
Reference in New Issue
Block a user