feat: remove chunking and switch it to a multi step process instead if tokens are too high

This commit is contained in:
2024-12-30 00:42:51 +01:00
parent 761ef836cd
commit 9251043aec
3 changed files with 40 additions and 79 deletions

View File

@@ -19,8 +19,7 @@ end
function M.debug_log(msg)
if conf.improved_debug and debug_bufnr then
local lines = vim.split(msg, "\n")
vim.api.nvim_buf_set_lines(debug_bufnr, -1, -1, false, lines)
vim.api.nvim_buf_set_lines(debug_bufnr, -1, -1, false, { msg })
else
if conf.debug then
vim.api.nvim_out_write("[chatgpt_nvim:debug] " .. msg .. "\n")
@@ -89,28 +88,8 @@ function M.pick_directories(dirs)
return selected_dirs
end
function M.chunkify(text, estimate_tokens_fn, token_limit)
local lines = vim.split(text, "\n")
local chunks = {}
local current_chunk = {}
local current_text = ""
--------------------------------------------------------------------------------
-- The old chunkify method has been removed, since we now rely on step-by-step
--------------------------------------------------------------------------------
for _, line in ipairs(lines) do
local test_text = (current_text == "") and line or (current_text .. "\n" .. line)
local est_tokens = estimate_tokens_fn(test_text)
if est_tokens > token_limit then
table.insert(chunks, current_text)
current_text = line
else
current_text = test_text
end
end
if current_text ~= "" then
table.insert(chunks, current_text)
end
return chunks
end
return M
return M