feat: add a token limit and make it configurable

This commit is contained in:
2024-12-12 22:46:32 +01:00
parent 9a9868c991
commit 00a52998b8
2 changed files with 22 additions and 1 deletions

View File

@@ -24,6 +24,13 @@ local function parse_response(raw)
return data
end
-- A simple token estimation function (approx. 4 chars per token)
local function estimate_tokens(text)
local approx_chars_per_token = 4
local length = #text
return math.floor(length / approx_chars_per_token)
end
function M.run_chatgpt_command()
local conf = config.load()
local user_input = vim.fn.input("Message for O1 Model: ")
@@ -47,6 +54,15 @@ function M.run_chatgpt_command()
table.insert(sections, file_sections)
local prompt = table.concat(sections, "\n")
local token_limit = conf.token_limit or 8000
local token_count = estimate_tokens(prompt)
if token_count > token_limit then
print("Too many files attached. The request exceeds the O1 model limit of " .. token_limit .. " tokens.")
return
end
copy_to_clipboard(prompt)
print("Prompt copied to clipboard! Paste it into the ChatGPT O1 model.")
end