feat: add a token limit and make it configurable

This commit is contained in:
2024-12-12 22:46:32 +01:00
parent 9a9868c991
commit 00a52998b8
2 changed files with 22 additions and 1 deletions

View File

@@ -3,6 +3,7 @@
-- 1) Determine the Git root based on the currently opened file. -- 1) Determine the Git root based on the currently opened file.
-- 2) If no file is open or not in Git repo, fallback to current working directory. -- 2) If no file is open or not in Git repo, fallback to current working directory.
-- 3) Add support for configuring a list of default prompt blocks ("go-development", "typo3-development", "basic-prompt") that can override the initial prompt if provided. -- 3) Add support for configuring a list of default prompt blocks ("go-development", "typo3-development", "basic-prompt") that can override the initial prompt if provided.
-- 4) Add support for configuring a token limit.
local M = {} local M = {}
local uv = vim.loop local uv = vim.loop
@@ -57,7 +58,8 @@ function M.load()
local config = { local config = {
initial_prompt = "", initial_prompt = "",
directories = { "." }, directories = { "." },
default_prompt_blocks = {} default_prompt_blocks = {},
token_limit = 128000
} }
if fd then if fd then
@@ -76,6 +78,9 @@ function M.load()
if type(result.default_prompt_blocks) == "table" then if type(result.default_prompt_blocks) == "table" then
config.default_prompt_blocks = result.default_prompt_blocks config.default_prompt_blocks = result.default_prompt_blocks
end end
if type(result.token_limit) == "number" then
config.token_limit = result.token_limit
end
end end
end end
else else

View File

@@ -24,6 +24,13 @@ local function parse_response(raw)
return data return data
end end
-- A simple token estimation function (approx. 4 chars per token)
local function estimate_tokens(text)
local approx_chars_per_token = 4
local length = #text
return math.floor(length / approx_chars_per_token)
end
function M.run_chatgpt_command() function M.run_chatgpt_command()
local conf = config.load() local conf = config.load()
local user_input = vim.fn.input("Message for O1 Model: ") local user_input = vim.fn.input("Message for O1 Model: ")
@@ -47,6 +54,15 @@ function M.run_chatgpt_command()
table.insert(sections, file_sections) table.insert(sections, file_sections)
local prompt = table.concat(sections, "\n") local prompt = table.concat(sections, "\n")
local token_limit = conf.token_limit or 8000
local token_count = estimate_tokens(prompt)
if token_count > token_limit then
print("Too many files attached. The request exceeds the O1 model limit of " .. token_limit .. " tokens.")
return
end
copy_to_clipboard(prompt) copy_to_clipboard(prompt)
print("Prompt copied to clipboard! Paste it into the ChatGPT O1 model.") print("Prompt copied to clipboard! Paste it into the ChatGPT O1 model.")
end end