feat: change to prompt_char_limit instead of token limit

This commit is contained in:
2025-01-04 20:18:44 +01:00
parent 452253cdd0
commit 2c855e881b

View File

@@ -53,37 +53,16 @@ local function is_directory(path)
return stat and stat.type == "directory"
end
local function estimate_tokens_basic(text)
local approx_chars_per_token = 4
local length = #text
return math.floor(length / approx_chars_per_token)
end
local function estimate_tokens_improved(text)
local words = #vim.split(text, "%s+")
local approximate_tokens = math.floor(words * 0.75)
ui.debug_log("Using improved token estimate: " .. approximate_tokens .. " tokens")
return approximate_tokens
end
local function get_estimate_fn()
-- We now simply count characters instead of estimating tokens.
local function handle_step_by_step_if_needed(prompt)
local conf = config.load()
if conf.improved_debug then
return estimate_tokens_improved
else
return estimate_tokens_basic
end
end
local function handle_step_by_step_if_needed(prompt, estimate_fn)
local conf = config.load()
local token_count = estimate_fn(prompt)
if not conf.enable_step_by_step or token_count <= (conf.token_limit or 8000) then
local length = #prompt
if not conf.enable_step_by_step or length <= (conf.prompt_char_limit or 8000) then
return { prompt }
end
local step_prompt = [[
It appears this request might exceed the model's token limit if done all at once.
It appears this request might exceed the model's prompt character limit if done all at once.
Please break down the tasks into smaller steps and handle them one by one.
At each step, we'll provide relevant files or context if needed.
Thank you!
@@ -415,9 +394,7 @@ function M.run_chatgpt_command()
local prompt = table.concat(initial_sections, "\n")
store_prompt_for_reference(prompt)
local estimate_fn = get_estimate_fn()
local chunks = handle_step_by_step_if_needed(prompt, estimate_fn)
local chunks = handle_step_by_step_if_needed(prompt)
copy_to_clipboard(chunks[1])
if #chunks == 1 then
vim.api.nvim_out_write("Prompt copied to clipboard! Paste into ChatGPT.\n")
@@ -557,11 +534,10 @@ function M.run_chatgpt_paste_command()
}
local prompt = table.concat(sections, "\n")
local estimate_fn = get_estimate_fn()
local token_count = estimate_fn(prompt)
ui.debug_log("Returning requested files. Token count: " .. token_count)
local length = #prompt
ui.debug_log("Returning requested files. Character count: " .. length)
if token_count > (conf.token_limit or 8000) and conf.enable_step_by_step then
if length > (conf.prompt_char_limit or 8000) and conf.enable_step_by_step then
local step_message = [[
It appears this requested data is quite large. Please split the task into smaller steps
and continue step by step.
@@ -570,8 +546,8 @@ function M.run_chatgpt_paste_command()
copy_to_clipboard(step_message)
print("Step-by-step guidance copied to clipboard!")
return
elseif token_count > (conf.token_limit or 8000) then
vim.api.nvim_err_writeln("Requested files exceed token limit. No step-by-step support enabled.")
elseif length > (conf.prompt_char_limit or 8000) then
vim.api.nvim_err_writeln("Requested files exceed prompt character limit. No step-by-step support enabled.")
return
end
@@ -600,6 +576,26 @@ function M.run_chatgpt_current_buffer_command()
local initial_files = conf.initial_files or {}
local included_sections = {}
local function is_directory(path)
local stat = vim.loop.fs_stat(path)
return stat and stat.type == "directory"
end
local function read_file(path)
local fd = vim.loop.fs_open(path, "r", 438)
if not fd then
return nil
end
local stat = vim.loop.fs_fstat(fd)
if not stat then
vim.loop.fs_close(fd)
return nil
end
local data = vim.loop.fs_read(fd, stat.size, 0)
vim.loop.fs_close(fd)
return data
end
for _, item in ipairs(initial_files) do
local root = vim.fn.getcwd()
local full_path = root .. "/" .. item
@@ -653,11 +649,30 @@ function M.run_chatgpt_current_buffer_command()
end
local prompt = table.concat(initial_sections, "\n")
local function store_prompt_for_reference(pr)
close_existing_buffer_by_name("ChatGPT_Generated_Prompt$")
local bufnr_ref = vim.api.nvim_create_buf(false, true)
vim.api.nvim_buf_set_name(bufnr_ref, "ChatGPT_Generated_Prompt")
vim.api.nvim_buf_set_option(bufnr_ref, "filetype", "markdown")
local lines_ref = {
"# Below is the generated prompt. You can keep it for reference:",
""
}
local pr_lines = vim.split(pr, "\n")
for _, line in ipairs(pr_lines) do
table.insert(lines_ref, line)
end
vim.api.nvim_buf_set_lines(bufnr_ref, 0, -1, false, lines_ref)
vim.cmd("vsplit")
vim.cmd("buffer " .. bufnr_ref)
end
store_prompt_for_reference(prompt)
local estimate_fn = get_estimate_fn()
local chunks = handle_step_by_step_if_needed(prompt, estimate_fn)
local chunks = handle_step_by_step_if_needed(prompt)
copy_to_clipboard(chunks[1])
if #chunks == 1 then
vim.api.nvim_out_write("Prompt (from current buffer) copied to clipboard! Paste into ChatGPT.\n")