feat: change to create a buffer for the prompt

This commit is contained in:
2024-12-23 15:34:36 +01:00
parent d6dc98cd58
commit eb19c4144f

View File

@@ -62,74 +62,104 @@ local function is_directory(path)
return stat and stat.type == "directory" return stat and stat.type == "directory"
end end
-- Replaces the inline input() call with opening a new buffer for prompt input
function M.run_chatgpt_command() function M.run_chatgpt_command()
local conf = config.load() local conf = config.load()
if conf.debug then if conf.debug then
vim.api.nvim_out_write("[chatgpt_nvim:init] Running :ChatGPT command.\n") vim.api.nvim_out_write("[chatgpt_nvim:init] Running :ChatGPT command.\n")
end end
local user_input = vim.fn.input("Message for O1 Model: ")
if user_input == "" then
print("No input provided.")
return
end
local dirs = conf.directories or {"."} -- Create a new scratch buffer for user to type the message
local project_structure = context.get_project_structure(dirs) local bufnr = vim.api.nvim_create_buf(false, true)
vim.api.nvim_buf_set_name(bufnr, "ChatGPT_Prompt")
vim.api.nvim_buf_set_option(bufnr, 'bufhidden', 'wipe')
vim.api.nvim_buf_set_option(bufnr, 'filetype', 'markdown')
vim.api.nvim_buf_set_lines(bufnr, 0, -1, false, {
"# Enter your prompt below.",
"",
"Save & close with :wq to finalize your prompt."
})
local initial_files = conf.initial_files or {} local function on_write_post()
local included_sections = {} -- Read buffer lines and join them into a single string
local lines = vim.api.nvim_buf_get_lines(bufnr, 0, -1, false)
-- Skip the first lines that are placeholders, if desired
local user_input = table.concat(lines, "\n")
if #initial_files > 0 then if user_input == "" or user_input:find("^# Enter your prompt below.") then
table.insert(included_sections, "\n\nIncluded files and directories (pre-selected):\n") print("No valid input provided.")
local root = vim.fn.getcwd() return
for _, item in ipairs(initial_files) do end
local full_path = root .. "/" .. item
if is_directory(full_path) then -- Continue the same logic as originally, just using our new user_input
local dir_files = context.get_project_files({item}) local dirs = conf.directories or {"."}
for _, f in ipairs(dir_files) do local project_structure = context.get_project_structure(dirs)
local path = root .. "/" .. f
local data = read_file(path) local initial_files = conf.initial_files or {}
if data then local included_sections = {}
table.insert(included_sections, "\nFile: `" .. f .. "`\n```\n" .. data .. "\n```\n")
if #initial_files > 0 then
table.insert(included_sections, "\n\nIncluded files and directories (pre-selected):\n")
local root = vim.fn.getcwd()
for _, item in ipairs(initial_files) do
local full_path = root .. "/" .. item
if is_directory(full_path) then
local dir_files = context.get_project_files({item})
for _, f in ipairs(dir_files) do
local path = root .. "/" .. f
local data = read_file(path)
if data then
table.insert(included_sections, "\nFile: `" .. f .. "`\n```\n" .. data .. "\n```\n")
end
end
else
local data = read_file(full_path)
if data then
table.insert(included_sections, "\nFile: `" .. item .. "`\n```\n" .. data .. "\n```\n")
end end
end
else
local data = read_file(full_path)
if data then
table.insert(included_sections, "\nFile: `" .. item .. "`\n```\n" .. data .. "\n```\n")
end end
end end
end end
local initial_sections = {
"### Basic Prompt Instructions:\n",
conf.initial_prompt .. "\n\n\n",
"### User Instructions:\n",
user_input .. "\n\n\n",
"### Context/Data:\n",
"Project name: " .. (conf.project_name or "") .. "\n",
"Project Structure:\n",
project_structure,
table.concat(included_sections, "\n")
}
local prompt = table.concat(initial_sections, "\n")
local token_limit = conf.token_limit or 8000
local token_count = estimate_tokens(prompt)
if conf.debug then
vim.api.nvim_out_write("[chatgpt_nvim:init] Prompt token count: " .. token_count .. "\n")
end
if token_count > token_limit then
print("Too many files in project structure. The request exceeds the O1 model limit of " .. token_limit .. " tokens.")
return
end
copy_to_clipboard(prompt)
print("Prompt (requesting needed files) copied to clipboard! Paste it into the ChatGPT O1 model.")
end end
local initial_sections = { -- Create an autocmd that triggers once when user saves the buffer (BufWritePost)
"### Basic Prompt Instructions:\n", vim.api.nvim_create_autocmd("BufWritePost", {
conf.initial_prompt .. "\n\n\n", buffer = bufnr,
"### User Instructions:\n", once = true,
user_input .. "\n\n\n", callback = on_write_post
"### Context/Data:\n", })
"Project name: " .. (conf.project_name or "") .. "\n",
"Project Structure:\n",
project_structure,
table.concat(included_sections, "\n")
}
local prompt = table.concat(initial_sections, "\n") -- Switch to the newly created buffer
vim.cmd("buffer " .. bufnr)
local token_limit = conf.token_limit or 8000
local token_count = estimate_tokens(prompt)
if conf.debug then
vim.api.nvim_out_write("[chatgpt_nvim:init] Prompt token count: " .. token_count .. "\n")
end
if token_count > token_limit then
print("Too many files in project structure. The request exceeds the O1 model limit of " .. token_limit .. " tokens.")
return
end
copy_to_clipboard(prompt)
print("Prompt (requesting needed files) copied to clipboard! Paste it into the ChatGPT O1 model.")
end end
function M.run_chatgpt_paste_command() function M.run_chatgpt_paste_command()