feat: change to token calculation again

This commit is contained in:
2025-02-13 11:42:12 +01:00
parent 216a4f2603
commit 07eceb4bee
3 changed files with 19 additions and 12 deletions

View File

@@ -8,7 +8,7 @@ ignore_files:
- "*.log" - "*.log"
- "vendor/" - "vendor/"
include_file_contents: false include_file_contents: true
debug: false debug: false
improved_debug: false improved_debug: false

View File

@@ -87,6 +87,7 @@ function M.load()
"No config file found (tried .chatgpt_config.yaml, chatgpt_config.yaml). Using defaults.", "No config file found (tried .chatgpt_config.yaml, chatgpt_config.yaml). Using defaults.",
vim.log.levels.WARN vim.log.levels.WARN
) )
config.max_token = 2048
return config return config
end end
@@ -120,7 +121,7 @@ function M.load()
if type(result.initial_files) == "table" then if type(result.initial_files) == "table" then
config.initial_files = result.initial_files config.initial_files = result.initial_files
end end
if type(result.include_file_contents) == "boolean" then -- LOAD NEW FLAG if type(result.include_file_contents) == "boolean" then
config.include_file_contents = result.include_file_contents config.include_file_contents = result.include_file_contents
end end
if type(result.preview_changes) == "boolean" then if type(result.preview_changes) == "boolean" then
@@ -153,10 +154,17 @@ function M.load()
end end
end end
end end
if type(result.max_token) == "number" then
config.max_token = result.max_token
else
config.max_token = 2048
end
end end
end end
else else
config.initial_prompt = "You are a coding assistant who receives a project's context and user instructions..." config.initial_prompt = "You are a coding assistant who receives a project's context and user instructions..."
config.max_token = 2048
end end
-- Merge default prompt blocks -- Merge default prompt blocks

View File

@@ -173,15 +173,20 @@ local function build_prompt(user_input, dirs, conf)
table.insert(final_sections, table.concat(env_lines, "\n")) table.insert(final_sections, table.concat(env_lines, "\n"))
local final_prompt = table.concat(final_sections, "\n\n") local final_prompt = table.concat(final_sections, "\n\n")
-- Replace placeholder "%PROJECT_NAME%" with the actual project name from configuration
final_prompt = final_prompt:gsub("%%PROJECT_NAME%%", conf.project_name) final_prompt = final_prompt:gsub("%%PROJECT_NAME%%", conf.project_name)
return final_prompt return final_prompt
end end
-- New token estimation function.
local function estimate_token_count(text)
-- Use a simple heuristic: assume an average of 4 characters per token.
return math.floor(#text / 4)
end
local function handle_step_by_step_if_needed(prompt, conf) local function handle_step_by_step_if_needed(prompt, conf)
local length = #prompt local token_count = estimate_token_count(prompt)
local limit = conf.prompt_char_limit or 8000 local limit = conf.max_token or 2048
if (not conf.enable_step_by_step) or (length <= limit) then if (not conf.enable_step_by_step) or (token_count <= limit) then
return { prompt } return { prompt }
end end
return { prompts["step-prompt"] } return { prompts["step-prompt"] }
@@ -208,7 +213,6 @@ local function run_chatgpt_command()
local bufnr = vim.api.nvim_create_buf(false, false) local bufnr = vim.api.nvim_create_buf(false, false)
vim.api.nvim_buf_set_name(bufnr, "ChatGPT_Prompt.md") vim.api.nvim_buf_set_name(bufnr, "ChatGPT_Prompt.md")
vim.api.nvim_buf_set_option(bufnr, "filetype", "markdown") vim.api.nvim_buf_set_option(bufnr, "filetype", "markdown")
-- Set omnifunc for file name auto-completion
vim.api.nvim_buf_set_option(bufnr, "omnifunc", "v:lua.chatgpt_file_complete") vim.api.nvim_buf_set_option(bufnr, "omnifunc", "v:lua.chatgpt_file_complete")
vim.api.nvim_buf_set_option(bufnr, "bufhidden", "wipe") vim.api.nvim_buf_set_option(bufnr, "bufhidden", "wipe")
vim.api.nvim_buf_set_option(bufnr, "buftype", "") vim.api.nvim_buf_set_option(bufnr, "buftype", "")
@@ -280,9 +284,7 @@ local function run_chatgpt_paste_command()
return return
end end
-- Check if we have tools
if data.tools then if data.tools then
-- Must also verify project name
if not data.project_name or data.project_name ~= conf.project_name then if not data.project_name or data.project_name ~= conf.project_name then
vim.api.nvim_err_writeln("Project name mismatch or missing. Aborting tool usage.") vim.api.nvim_err_writeln("Project name mismatch or missing. Aborting tool usage.")
return return
@@ -294,7 +296,6 @@ local function run_chatgpt_paste_command()
return return
end end
-- If we see project_name & files => older YAML style. We handle it but it's discouraged now.
if data.project_name and data.files then if data.project_name and data.files then
if data.project_name ~= conf.project_name then if data.project_name ~= conf.project_name then
vim.api.nvim_err_writeln("Project name mismatch. Aborting.") vim.api.nvim_err_writeln("Project name mismatch. Aborting.")
@@ -361,7 +362,6 @@ local function run_chatgpt_paste_command()
end end
end end
else else
-- Not final => user is requesting more files
local requested_paths = {} local requested_paths = {}
local root = vim.fn.getcwd() local root = vim.fn.getcwd()
for _, fileinfo in ipairs(data.files) do for _, fileinfo in ipairs(data.files) do
@@ -459,7 +459,6 @@ M.run_chatgpt_command = run_chatgpt_command
M.run_chatgpt_paste_command = run_chatgpt_paste_command M.run_chatgpt_paste_command = run_chatgpt_paste_command
M.run_chatgpt_current_buffer_command = run_chatgpt_current_buffer_command M.run_chatgpt_current_buffer_command = run_chatgpt_current_buffer_command
-- New: Global function for file name auto-completion in ChatGPT prompt
function _G.chatgpt_file_complete(findstart, base) function _G.chatgpt_file_complete(findstart, base)
if findstart == 1 then if findstart == 1 then
local line = vim.fn.getline('.') local line = vim.fn.getline('.')