diff --git a/lua/chatgpt_nvim/init.lua b/lua/chatgpt_nvim/init.lua index 83dec2a..c05e987 100644 --- a/lua/chatgpt_nvim/init.lua +++ b/lua/chatgpt_nvim/init.lua @@ -62,74 +62,104 @@ local function is_directory(path) return stat and stat.type == "directory" end +-- Replaces the inline input() call with opening a new buffer for prompt input function M.run_chatgpt_command() local conf = config.load() if conf.debug then vim.api.nvim_out_write("[chatgpt_nvim:init] Running :ChatGPT command.\n") end - local user_input = vim.fn.input("Message for O1 Model: ") - if user_input == "" then - print("No input provided.") - return - end - local dirs = conf.directories or {"."} - local project_structure = context.get_project_structure(dirs) + -- Create a new scratch buffer for user to type the message + local bufnr = vim.api.nvim_create_buf(false, true) + vim.api.nvim_buf_set_name(bufnr, "ChatGPT_Prompt") + vim.api.nvim_buf_set_option(bufnr, 'bufhidden', 'wipe') + vim.api.nvim_buf_set_option(bufnr, 'filetype', 'markdown') + vim.api.nvim_buf_set_lines(bufnr, 0, -1, false, { + "# Enter your prompt below.", + "", + "Save & close with :wq to finalize your prompt." + }) - local initial_files = conf.initial_files or {} - local included_sections = {} + local function on_write_post() + -- Read buffer lines and join them into a single string + local lines = vim.api.nvim_buf_get_lines(bufnr, 0, -1, false) + -- Skip the first lines that are placeholders, if desired + local user_input = table.concat(lines, "\n") - if #initial_files > 0 then - table.insert(included_sections, "\n\nIncluded files and directories (pre-selected):\n") - local root = vim.fn.getcwd() - for _, item in ipairs(initial_files) do - local full_path = root .. "/" .. item - if is_directory(full_path) then - local dir_files = context.get_project_files({item}) - for _, f in ipairs(dir_files) do - local path = root .. "/" .. f - local data = read_file(path) - if data then - table.insert(included_sections, "\nFile: `" .. f .. "`\n```\n" .. data .. "\n```\n") + if user_input == "" or user_input:find("^# Enter your prompt below.") then + print("No valid input provided.") + return + end + + -- Continue the same logic as originally, just using our new user_input + local dirs = conf.directories or {"."} + local project_structure = context.get_project_structure(dirs) + + local initial_files = conf.initial_files or {} + local included_sections = {} + + if #initial_files > 0 then + table.insert(included_sections, "\n\nIncluded files and directories (pre-selected):\n") + local root = vim.fn.getcwd() + for _, item in ipairs(initial_files) do + local full_path = root .. "/" .. item + if is_directory(full_path) then + local dir_files = context.get_project_files({item}) + for _, f in ipairs(dir_files) do + local path = root .. "/" .. f + local data = read_file(path) + if data then + table.insert(included_sections, "\nFile: `" .. f .. "`\n```\n" .. data .. "\n```\n") + end + end + else + local data = read_file(full_path) + if data then + table.insert(included_sections, "\nFile: `" .. item .. "`\n```\n" .. data .. "\n```\n") end - end - else - local data = read_file(full_path) - if data then - table.insert(included_sections, "\nFile: `" .. item .. "`\n```\n" .. data .. "\n```\n") end end end + + local initial_sections = { + "### Basic Prompt Instructions:\n", + conf.initial_prompt .. "\n\n\n", + "### User Instructions:\n", + user_input .. "\n\n\n", + "### Context/Data:\n", + "Project name: " .. (conf.project_name or "") .. "\n", + "Project Structure:\n", + project_structure, + table.concat(included_sections, "\n") + } + + local prompt = table.concat(initial_sections, "\n") + + local token_limit = conf.token_limit or 8000 + local token_count = estimate_tokens(prompt) + + if conf.debug then + vim.api.nvim_out_write("[chatgpt_nvim:init] Prompt token count: " .. token_count .. "\n") + end + + if token_count > token_limit then + print("Too many files in project structure. The request exceeds the O1 model limit of " .. token_limit .. " tokens.") + return + end + + copy_to_clipboard(prompt) + print("Prompt (requesting needed files) copied to clipboard! Paste it into the ChatGPT O1 model.") end - local initial_sections = { - "### Basic Prompt Instructions:\n", - conf.initial_prompt .. "\n\n\n", - "### User Instructions:\n", - user_input .. "\n\n\n", - "### Context/Data:\n", - "Project name: " .. (conf.project_name or "") .. "\n", - "Project Structure:\n", - project_structure, - table.concat(included_sections, "\n") - } + -- Create an autocmd that triggers once when user saves the buffer (BufWritePost) + vim.api.nvim_create_autocmd("BufWritePost", { + buffer = bufnr, + once = true, + callback = on_write_post + }) - local prompt = table.concat(initial_sections, "\n") - - local token_limit = conf.token_limit or 8000 - local token_count = estimate_tokens(prompt) - - if conf.debug then - vim.api.nvim_out_write("[chatgpt_nvim:init] Prompt token count: " .. token_count .. "\n") - end - - if token_count > token_limit then - print("Too many files in project structure. The request exceeds the O1 model limit of " .. token_limit .. " tokens.") - return - end - - copy_to_clipboard(prompt) - print("Prompt (requesting needed files) copied to clipboard! Paste it into the ChatGPT O1 model.") + -- Switch to the newly created buffer + vim.cmd("buffer " .. bufnr) end function M.run_chatgpt_paste_command() @@ -253,4 +283,4 @@ function M.run_chatgpt_paste_command() end end -return M +return M \ No newline at end of file