feat: adding chunking feature, adding preview and partial accepting
This commit is contained in:
@@ -1,9 +1,21 @@
|
||||
project_name: "chatgpt_nvim"
|
||||
default_prompt_blocks:
|
||||
- "basic-prompt"
|
||||
- "workflow-prompt"
|
||||
directories:
|
||||
- "."
|
||||
initial_files:
|
||||
- "README.md"
|
||||
- "lua"
|
||||
debug: false
|
||||
|
||||
# New config flags:
|
||||
preview_changes: true
|
||||
interactive_file_selection: true
|
||||
partial_acceptance: true
|
||||
improved_debug: true
|
||||
|
||||
# We rely on token_limit to decide chunk sizes
|
||||
token_limit: 3000
|
||||
|
||||
# Enable chunking if we exceed the token limit
|
||||
enable_chunking: true
|
||||
|
||||
80
README.md
80
README.md
@@ -1,46 +1,66 @@
|
||||
<!-- README.md -->
|
||||
# ChatGPT NeoVim Plugin (Updated for YAML and Default Prompt Blocks)
|
||||
# ChatGPT NeoVim Plugin (Extensively Updated with Chunking)
|
||||
|
||||
This plugin integrates a ChatGPT O1 model workflow into Neovim. It allows you to generate prompts containing:
|
||||
- An **initial prompt** configured via a `.chatgpt_config.yaml` file in your project root.
|
||||
- A list of directories (also specified in the `.chatgpt_config.yaml`) from which it gathers the complete project structure and file contents.
|
||||
- The content of the current file you are editing and the project's `README.md`, if present.
|
||||
- Prompts are combined and copied to your clipboard, ready to be pasted into ChatGPT O1.
|
||||
This plugin integrates a ChatGPT O1 model workflow into Neovim. It allows you to:
|
||||
|
||||
The ChatGPT O1 model is then expected to return modifications in a **YAML** format. You will then use the `:ChatGPTPaste` command to read these changes from the clipboard and apply them to your files.
|
||||
1. Generate prompts containing:
|
||||
- An **initial prompt** (from `.chatgpt_config.yaml`)
|
||||
- A list of directories (also specified in `.chatgpt_config.yaml`) from which it gathers the project structure and file contents
|
||||
- **Interactive file selection** if enabled, so you can pick exactly which directories to include
|
||||
- Any **initial files** you define (e.g., `README.md`, etc.)
|
||||
|
||||
## Default Prompt Blocks
|
||||
2. Copy these prompts to your clipboard to paste into ChatGPT O1.
|
||||
3. Receive YAML changes from ChatGPT, then run `:ChatGPTPaste` to apply them or supply additional files.
|
||||
|
||||
The configuration now supports specifying one or more "default prompt blocks." These blocks allow you to quickly switch between different kinds of specialized instructions for ChatGPT. For example:
|
||||
- **go-development**: Focused on Go (Golang) coding best practices, idiomatic patterns, and Go-specific guidance.
|
||||
- **typo3-development**: Focused on TYPO3 development practices, TypoScript, and extension coding guidelines.
|
||||
- **basic-prompt**: A generic prompt that explains how to return modifications in the required YAML structure.
|
||||
## New Key Features
|
||||
|
||||
If you specify multiple blocks in `default_prompt_blocks`, their instructions are concatenated. This lets you combine multiple focus areas or start with a base prompt and add a specialized layer on top.
|
||||
- **Chunking** (`enable_chunking: true`): If the combined prompt or file request is too large (exceeds `token_limit`), the plugin automatically splits it into multiple chunks. Each chunk is opened in its own buffer, and the first chunk is copied to your clipboard. You can paste them sequentially into ChatGPT to work around size limitations.
|
||||
- **Partial Acceptance**: If `partial_acceptance: true`, you can open a buffer that lists the final changes. Remove or comment out lines you don’t want, then only those changes are applied.
|
||||
- **Preview Changes**: If `preview_changes: true`, you get a buffer showing proposed changes before you apply them.
|
||||
- **Interactive File Selection**: If `interactive_file_selection: true`, you choose which directories from `.chatgpt_config.yaml` get included in the prompt, reducing token usage.
|
||||
- **Improved Debug**: If `improved_debug: true`, debug logs go into a dedicated `ChatGPT_Debug_Log` buffer for easier reading.
|
||||
|
||||
## Example Configuration
|
||||
|
||||
Below is an example `.chatgpt_config.yaml` that sets a custom initial prompt, limits directories to scan, and selects default prompt blocks:
|
||||
## Example `.chatgpt_config.yaml`
|
||||
|
||||
```yaml
|
||||
initial_prompt: "You are a coding assistant who will provide modifications in YAML form."
|
||||
directories:
|
||||
- "lua"
|
||||
- "plugin"
|
||||
project_name: "chatgpt_nvim"
|
||||
default_prompt_blocks:
|
||||
- "basic-prompt"
|
||||
- "go-development"
|
||||
- "workflow-prompt"
|
||||
directories:
|
||||
- "."
|
||||
initial_files:
|
||||
- "README.md"
|
||||
debug: false
|
||||
enable_chunking: true
|
||||
preview_changes: true
|
||||
interactive_file_selection: true
|
||||
partial_acceptance: true
|
||||
improved_debug: true
|
||||
token_limit: 3000
|
||||
```
|
||||
|
||||
With this configuration:
|
||||
- The prompt shown to ChatGPT will include both the "basic-prompt" and "go-development" instructions, as well as the user’s message.
|
||||
- Only the `lua` and `plugin` directories of your project will be included in the prompt.
|
||||
|
||||
## Usage
|
||||
|
||||
1. Run `:ChatGPT` and enter your message. The combined prompt is copied to your clipboard.
|
||||
2. Paste it into the ChatGPT O1 model.
|
||||
3. Copy the YAML response from ChatGPT into your clipboard.
|
||||
4. Run `:ChatGPTPaste` to apply the changes to your files.
|
||||
1. **`:ChatGPT`**
|
||||
- If `interactive_file_selection` is on, you’ll pick directories to include.
|
||||
- A buffer `ChatGPT_Prompt.md` opens to type your instructions.
|
||||
- Save & close with `:wq` → If `enable_chunking` is on and the prompt exceeds `token_limit`, it’s split into multiple chunks. Each chunk is opened in a buffer, and the first one is copied to your clipboard.
|
||||
|
||||
This workflow allows you to iterate quickly, making changes guided by AI while keeping a clear history and a structured approach.
|
||||
2. **Paste Prompt to ChatGPT**
|
||||
- If multiple chunks exist, copy/paste them one by one in ChatGPT.
|
||||
|
||||
3. **`:ChatGPTPaste`**
|
||||
- The plugin reads the YAML from your clipboard. If it requests more files, the plugin might chunk that request too if large.
|
||||
- If final changes are provided:
|
||||
- Optionally preview them (`preview_changes`).
|
||||
- Optionally partially accept them (`partial_acceptance`).
|
||||
- Then the plugin writes/deletes files as specified.
|
||||
|
||||
## Troubleshooting & Tips
|
||||
- Adjust `token_limit` in `.chatgpt_config.yaml` as needed.
|
||||
- If partial acceptance is confusing, remember to remove or prepend `#` to lines you don’t want before saving and closing the buffer.
|
||||
- If chunking occurs, ensure you copy/paste **all chunks** into ChatGPT in the correct order.
|
||||
- Check `ChatGPT_Debug_Log` if `improved_debug` is on, or the Neovim messages if `debug` is on, for detailed info.
|
||||
|
||||
Enjoy your improved, more flexible ChatGPT Neovim plugin with chunking support!
|
||||
|
||||
@@ -28,7 +28,7 @@ local prompt_blocks = {
|
||||
project_name: example_project
|
||||
files:
|
||||
- path: "relative/path/to/file"
|
||||
3. If more information or context is needed, ask the user (outside of the YAML) to provide that. For example:
|
||||
3. If more information or context is needed, ask the user (outside of the YAML) to provide that.
|
||||
4. When all necessary information is gathered, provide the final YAML with the
|
||||
project's name and a list of files to be created or modified.
|
||||
Also explain the changes you made below the yaml.
|
||||
@@ -46,6 +46,14 @@ local prompt_blocks = {
|
||||
As you generate code or provide guidance, you must consider the security impact of every decision.
|
||||
You will write and review code with a focus on minimizing vulnerabilities and following best security practices,
|
||||
such as validating all user inputs, avoiding unsafe libraries or functions, and following secure coding standards.
|
||||
]],
|
||||
["workflow-prompt"] = [[
|
||||
You are a coding assistant focusing on making the Neovim ChatGPT workflow straightforward and user-friendly.
|
||||
Provide a concise set of steps or guidance, reminding the user:
|
||||
- How to list needed files for further context
|
||||
- How to request additional information outside of the YAML
|
||||
- How to finalize changes with a YAML response containing project_name and files
|
||||
Always ensure that prompts and explanations remain clear and minimal, reducing user errors.
|
||||
]]
|
||||
}
|
||||
|
||||
@@ -88,7 +96,14 @@ function M.load()
|
||||
token_limit = 128000,
|
||||
project_name = "",
|
||||
debug = false,
|
||||
initial_files = {}
|
||||
initial_files = {},
|
||||
|
||||
-- Additional new config flags
|
||||
preview_changes = false,
|
||||
interactive_file_selection = false,
|
||||
partial_acceptance = false,
|
||||
improved_debug = false,
|
||||
enable_chunking = false
|
||||
}
|
||||
|
||||
if fd then
|
||||
@@ -119,10 +134,25 @@ function M.load()
|
||||
if type(result.initial_files) == "table" then
|
||||
config.initial_files = result.initial_files
|
||||
end
|
||||
if type(result.preview_changes) == "boolean" then
|
||||
config.preview_changes = result.preview_changes
|
||||
end
|
||||
if type(result.interactive_file_selection) == "boolean" then
|
||||
config.interactive_file_selection = result.interactive_file_selection
|
||||
end
|
||||
if type(result.partial_acceptance) == "boolean" then
|
||||
config.partial_acceptance = result.partial_acceptance
|
||||
end
|
||||
if type(result.improved_debug) == "boolean" then
|
||||
config.improved_debug = result.improved_debug
|
||||
end
|
||||
if type(result.enable_chunking) == "boolean" then
|
||||
config.enable_chunking = result.enable_chunking
|
||||
end
|
||||
end
|
||||
end
|
||||
else
|
||||
config.initial_prompt = "You are a coding assistant who receives a project's context and user instructions. You will guide the user through a workflow:\n1. First, ask the user which files are needed from the project to understand and perform the coding task.\n2. If more information or context is needed, ask for it outside of the YAML.\n3. Once all information is obtained, return the final YAML with the project_name and the files to be created/modified or deleted.\n\nThe final YAML must have:\nproject_name: <project_name>\nfiles:\n - path: \"relative/path/to/file\"\n content: |\n <file content>\n - path: \"relative/path/to/other_file\"\n delete: true\n\nIf more context is needed at any step before providing the final YAML, request it outside of the YAML."
|
||||
config.initial_prompt = "You are a coding assistant who receives a project's context and user instructions..."
|
||||
end
|
||||
|
||||
-- Merge the default prompt blocks with the config's initial prompt
|
||||
@@ -135,7 +165,6 @@ function M.load()
|
||||
end
|
||||
if #merged_prompt > 0 then
|
||||
local combined_blocks = table.concat(merged_prompt, "\n\n")
|
||||
-- We now *append* or combine the existing initial_prompt with the default blocks
|
||||
if config.initial_prompt ~= "" then
|
||||
config.initial_prompt = config.initial_prompt .. "\n\n" .. combined_blocks
|
||||
else
|
||||
|
||||
@@ -3,6 +3,7 @@ local M = {}
|
||||
local context = require('chatgpt_nvim.context')
|
||||
local handler = require('chatgpt_nvim.handler')
|
||||
local config = require('chatgpt_nvim.config')
|
||||
local ui = require('chatgpt_nvim.ui')
|
||||
|
||||
local ok_yaml, lyaml = pcall(require, "lyaml")
|
||||
|
||||
@@ -19,23 +20,13 @@ local function parse_response(raw)
|
||||
local ok, data = pcall(lyaml.load, raw)
|
||||
if not ok or not data then
|
||||
vim.api.nvim_err_writeln("Failed to parse YAML response.")
|
||||
if conf.debug then
|
||||
vim.api.nvim_out_write("[chatgpt_nvim:init] RAW response that failed parsing:\n" .. raw .. "\n")
|
||||
end
|
||||
ui.debug_log("RAW response that failed parsing:\n" .. raw)
|
||||
return nil
|
||||
end
|
||||
if conf.debug then
|
||||
vim.api.nvim_out_write("[chatgpt_nvim:init] Successfully parsed YAML response.\n")
|
||||
end
|
||||
ui.debug_log("Successfully parsed YAML response.")
|
||||
return data
|
||||
end
|
||||
|
||||
local function estimate_tokens(text)
|
||||
local approx_chars_per_token = 4
|
||||
local length = #text
|
||||
return math.floor(length / approx_chars_per_token)
|
||||
end
|
||||
|
||||
local function is_subpath(root, path)
|
||||
local root_abs = vim.fn.fnamemodify(root, ":p")
|
||||
local target_abs = vim.fn.fnamemodify(path, ":p")
|
||||
@@ -62,74 +53,256 @@ local function is_directory(path)
|
||||
return stat and stat.type == "directory"
|
||||
end
|
||||
|
||||
---------------------------------------------------------------------------
|
||||
-- Updated run_chatgpt_command using BufWriteCmd to avoid creating a file --
|
||||
---------------------------------------------------------------------------
|
||||
function M.run_chatgpt_command()
|
||||
-- We'll create two token estimate functions: one basic, one improved
|
||||
local function estimate_tokens_basic(text)
|
||||
local approx_chars_per_token = 4
|
||||
local length = #text
|
||||
return math.floor(length / approx_chars_per_token)
|
||||
end
|
||||
|
||||
local function estimate_tokens_improved(text)
|
||||
-- Word-based approach, assume ~0.75 token/word
|
||||
local words = #vim.split(text, "%s+")
|
||||
local approximate_tokens = math.floor(words * 0.75)
|
||||
ui.debug_log("Using improved token estimate: " .. approximate_tokens .. " tokens")
|
||||
return approximate_tokens
|
||||
end
|
||||
|
||||
local function get_estimate_fn()
|
||||
local conf = config.load()
|
||||
if conf.debug then
|
||||
vim.api.nvim_out_write("[chatgpt_nvim:init] Running :ChatGPT command.\n")
|
||||
if conf.improved_debug then
|
||||
return estimate_tokens_improved
|
||||
else
|
||||
return estimate_tokens_basic
|
||||
end
|
||||
end
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
-- CHUNKING
|
||||
-----------------------------------------------------------------------------
|
||||
-- If chunking is enabled and we exceed the token limit, we split the prompt
|
||||
-- into multiple chunks. We then copy each chunk in turn to the clipboard.
|
||||
-- The user can paste them one by one into ChatGPT. This is a naive approach,
|
||||
-- but helps with extremely large requests.
|
||||
-----------------------------------------------------------------------------
|
||||
local function handle_chunking_if_needed(prompt, estimate_fn)
|
||||
local conf = config.load()
|
||||
local token_count = estimate_fn(prompt)
|
||||
if not conf.enable_chunking or token_count <= (conf.token_limit or 8000) then
|
||||
return { prompt }
|
||||
end
|
||||
|
||||
local chunks = ui.chunkify(prompt, estimate_fn, conf.token_limit or 8000)
|
||||
return chunks
|
||||
end
|
||||
|
||||
-- Show the user a preview buffer with the proposed changes (unchanged).
|
||||
local function preview_changes(changes)
|
||||
local bufnr = vim.api.nvim_create_buf(false, true)
|
||||
vim.api.nvim_buf_set_name(bufnr, "ChatGPT_Changes_Preview")
|
||||
vim.api.nvim_buf_set_option(bufnr, "filetype", "diff")
|
||||
vim.api.nvim_buf_set_lines(bufnr, 0, -1, false, {
|
||||
"# Preview of Changes:",
|
||||
"# (Close this window to apply changes or run :q to cancel)",
|
||||
""
|
||||
})
|
||||
for _, fileinfo in ipairs(changes) do
|
||||
local indicator = (fileinfo.delete == true) and "Delete file" or "Write file"
|
||||
vim.api.nvim_buf_set_lines(bufnr, -1, -1, false, {
|
||||
string.format("=== %s: %s ===", indicator, fileinfo.path or "<no path>")
|
||||
})
|
||||
if fileinfo.content then
|
||||
local lines = vim.split(fileinfo.content, "\n")
|
||||
for _, line in ipairs(lines) do
|
||||
vim.api.nvim_buf_set_lines(bufnr, -1, -1, false, { line })
|
||||
end
|
||||
end
|
||||
vim.api.nvim_buf_set_lines(bufnr, -1, -1, false, { "" })
|
||||
end
|
||||
|
||||
vim.cmd("vsplit")
|
||||
vim.cmd("buffer " .. bufnr)
|
||||
end
|
||||
|
||||
-- Minimal partial acceptance from previous example
|
||||
local function partial_accept(changes)
|
||||
local bufnr = vim.api.nvim_create_buf(false, true)
|
||||
vim.api.nvim_buf_set_name(bufnr, "ChatGPT_Partial_Accept")
|
||||
vim.api.nvim_buf_set_option(bufnr, "filetype", "diff")
|
||||
|
||||
local lines = {
|
||||
"# Remove or comment out (prepend '#') any changes you do NOT want, then :wq to finalize partial acceptance",
|
||||
""
|
||||
}
|
||||
for _, fileinfo in ipairs(changes) do
|
||||
local action = (fileinfo.delete == true) and "[DELETE]" or "[WRITE]"
|
||||
table.insert(lines, string.format("%s %s", action, fileinfo.path or "<no path>"))
|
||||
if fileinfo.content then
|
||||
local content_lines = vim.split(fileinfo.content, "\n")
|
||||
for _, cl in ipairs(content_lines) do
|
||||
table.insert(lines, " " .. cl)
|
||||
end
|
||||
end
|
||||
table.insert(lines, "")
|
||||
end
|
||||
|
||||
vim.api.nvim_buf_set_lines(bufnr, 0, -1, false, lines)
|
||||
|
||||
local final_changes = {}
|
||||
local function on_write()
|
||||
local edited_lines = vim.api.nvim_buf_get_lines(bufnr, 0, -1, false)
|
||||
local keep_current = false
|
||||
local current_fileinfo = { path = nil, content = nil, delete = false }
|
||||
local content_accum = {}
|
||||
|
||||
for _, line in ipairs(edited_lines) do
|
||||
if line:match("^#") or line == "" then
|
||||
goto continue
|
||||
end
|
||||
local del_match = line:match("^%[DELETE%] (.+)")
|
||||
local write_match = line:match("^%[WRITE%] (.+)")
|
||||
if del_match then
|
||||
if keep_current and (current_fileinfo.path ~= nil) then
|
||||
if #content_accum > 0 then
|
||||
current_fileinfo.content = table.concat(content_accum, "\n")
|
||||
end
|
||||
table.insert(final_changes, current_fileinfo)
|
||||
end
|
||||
keep_current = true
|
||||
current_fileinfo = { path = del_match, delete = true, content = nil }
|
||||
content_accum = {}
|
||||
elseif write_match then
|
||||
if keep_current and (current_fileinfo.path ~= nil) then
|
||||
if #content_accum > 0 then
|
||||
current_fileinfo.content = table.concat(content_accum, "\n")
|
||||
end
|
||||
table.insert(final_changes, current_fileinfo)
|
||||
end
|
||||
keep_current = true
|
||||
current_fileinfo = { path = write_match, delete = false, content = nil }
|
||||
content_accum = {}
|
||||
else
|
||||
if keep_current then
|
||||
table.insert(content_accum, line:gsub("^%s*", ""))
|
||||
end
|
||||
end
|
||||
::continue::
|
||||
end
|
||||
|
||||
if keep_current and (current_fileinfo.path ~= nil) then
|
||||
if #content_accum > 0 then
|
||||
current_fileinfo.content = table.concat(content_accum, "\n")
|
||||
end
|
||||
table.insert(final_changes, current_fileinfo)
|
||||
end
|
||||
|
||||
vim.api.nvim_buf_set_option(bufnr, "modified", false)
|
||||
end
|
||||
|
||||
vim.api.nvim_create_autocmd("BufWriteCmd", {
|
||||
buffer = bufnr,
|
||||
once = true,
|
||||
callback = on_write
|
||||
})
|
||||
|
||||
vim.cmd("split")
|
||||
vim.cmd("buffer " .. bufnr)
|
||||
|
||||
vim.wait(60000, function()
|
||||
local winids = vim.api.nvim_tabpage_list_wins(0)
|
||||
for _, w in ipairs(winids) do
|
||||
local b = vim.api.nvim_win_get_buf(w)
|
||||
if b == bufnr then
|
||||
return false
|
||||
end
|
||||
end
|
||||
return true
|
||||
end)
|
||||
|
||||
return final_changes
|
||||
end
|
||||
|
||||
-- Utility to store generated prompt in a scratch buffer
|
||||
local function store_prompt_for_reference(prompt)
|
||||
local bufnr = vim.api.nvim_create_buf(false, true)
|
||||
vim.api.nvim_buf_set_name(bufnr, "ChatGPT_Generated_Prompt")
|
||||
vim.api.nvim_buf_set_option(bufnr, "filetype", "markdown")
|
||||
|
||||
local lines = {
|
||||
"# Below is the generated prompt. You can keep it for reference:",
|
||||
""
|
||||
}
|
||||
local prompt_lines = vim.split(prompt, "\n")
|
||||
for _, line in ipairs(prompt_lines) do
|
||||
table.insert(lines, line)
|
||||
end
|
||||
|
||||
vim.api.nvim_buf_set_lines(bufnr, 0, -1, false, lines)
|
||||
vim.cmd("vsplit")
|
||||
vim.cmd("buffer " .. bufnr)
|
||||
end
|
||||
|
||||
----------------------------------------------------------------------------
|
||||
-- run_chatgpt_command
|
||||
----------------------------------------------------------------------------
|
||||
function M.run_chatgpt_command()
|
||||
local conf = config.load()
|
||||
ui.debug_log("Running :ChatGPT command.")
|
||||
|
||||
-- Possibly let user select directories if interactive_file_selection is true
|
||||
local dirs = conf.directories or {"."}
|
||||
if conf.interactive_file_selection then
|
||||
dirs = ui.pick_directories(dirs)
|
||||
if #dirs == 0 then
|
||||
dirs = conf.directories
|
||||
end
|
||||
end
|
||||
|
||||
-- Create a normal, listed buffer so :w / :wq will work
|
||||
local bufnr = vim.api.nvim_create_buf(false, false)
|
||||
-- Assign a filename so Vim treats it like a normal file, but we intercept writes
|
||||
vim.api.nvim_buf_set_name(bufnr, "ChatGPT_Prompt.md")
|
||||
vim.api.nvim_buf_set_option(bufnr, "filetype", "markdown")
|
||||
vim.api.nvim_buf_set_option(bufnr, "bufhidden", "wipe")
|
||||
vim.api.nvim_buf_set_option(bufnr, "buftype", "")
|
||||
vim.api.nvim_buf_set_option(bufnr, "modifiable", true)
|
||||
|
||||
-- Set some initial placeholder lines
|
||||
vim.api.nvim_buf_set_lines(bufnr, 0, -1, false, {
|
||||
"# Enter your prompt below.",
|
||||
"",
|
||||
"Save & close with :wq to finalize your prompt."
|
||||
})
|
||||
|
||||
-- Intercept the write so that no file is actually created on disk
|
||||
vim.api.nvim_create_autocmd("BufWriteCmd", {
|
||||
buffer = bufnr,
|
||||
callback = function()
|
||||
-- Gather lines
|
||||
local lines = vim.api.nvim_buf_get_lines(bufnr, 0, -1, false)
|
||||
local user_input = table.concat(lines, "\n")
|
||||
|
||||
-- Basic check to ensure user actually wrote something
|
||||
if user_input == "" or user_input:find("^# Enter your prompt below.") then
|
||||
vim.api.nvim_out_write("No valid input provided.\n")
|
||||
-- Mark buffer as unmodified, so :wq can still exit
|
||||
vim.api.nvim_buf_set_option(bufnr, "modified", false)
|
||||
return
|
||||
end
|
||||
|
||||
-- Build the prompt using the user_input
|
||||
local dirs = conf.directories or {"."}
|
||||
local project_structure = context.get_project_structure(dirs)
|
||||
|
||||
local initial_files = conf.initial_files or {}
|
||||
local included_sections = {}
|
||||
|
||||
if #initial_files > 0 then
|
||||
table.insert(included_sections, "\n\nIncluded files and directories (pre-selected):\n")
|
||||
for _, item in ipairs(initial_files) do
|
||||
local root = vim.fn.getcwd()
|
||||
for _, item in ipairs(initial_files) do
|
||||
local full_path = root .. "/" .. item
|
||||
if is_directory(full_path) then
|
||||
local dir_files = context.get_project_files({item})
|
||||
for _, f in ipairs(dir_files) do
|
||||
local path = root .. "/" .. f
|
||||
local data = read_file(path)
|
||||
if data then
|
||||
table.insert(included_sections, "\nFile: `" .. f .. "`\n```\n" .. data .. "\n```\n")
|
||||
end
|
||||
end
|
||||
else
|
||||
local data = read_file(full_path)
|
||||
local full_path = root .. "/" .. item
|
||||
if is_directory(full_path) then
|
||||
local dir_files = context.get_project_files({item})
|
||||
for _, f in ipairs(dir_files) do
|
||||
local path = root .. "/" .. f
|
||||
local data = read_file(path)
|
||||
if data then
|
||||
table.insert(included_sections, "\nFile: `" .. item .. "`\n```\n" .. data .. "\n```\n")
|
||||
table.insert(included_sections, "\nFile: `" .. f .. "`\n```\n" .. data .. "\n```\n")
|
||||
end
|
||||
end
|
||||
else
|
||||
local data = read_file(full_path)
|
||||
if data then
|
||||
table.insert(included_sections, "\nFile: `" .. item .. "`\n```\n" .. data .. "\n```\n")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@@ -146,36 +319,50 @@ function M.run_chatgpt_command()
|
||||
}
|
||||
|
||||
local prompt = table.concat(initial_sections, "\n")
|
||||
store_prompt_for_reference(prompt)
|
||||
|
||||
local token_limit = conf.token_limit or 8000
|
||||
local token_count = estimate_tokens(prompt)
|
||||
local estimate_fn = get_estimate_fn()
|
||||
local chunks = handle_chunking_if_needed(prompt, estimate_fn)
|
||||
|
||||
if conf.debug then
|
||||
vim.api.nvim_out_write("[chatgpt_nvim:init] Prompt token count: " .. token_count .. "\n")
|
||||
end
|
||||
|
||||
if token_count > token_limit then
|
||||
vim.api.nvim_out_write("Too many files in project structure. The request exceeds the O1 model limit of " .. token_limit .. " tokens.\n")
|
||||
if #chunks == 1 then
|
||||
-- Single chunk, just copy as normal
|
||||
copy_to_clipboard(chunks[1])
|
||||
vim.api.nvim_out_write("Prompt copied to clipboard! Paste into ChatGPT.\n")
|
||||
else
|
||||
copy_to_clipboard(prompt)
|
||||
vim.api.nvim_out_write("Prompt (requesting needed files) copied to clipboard! Paste it into the ChatGPT O1 model.\n")
|
||||
-- Multiple chunks. We'll store them in separate scratch buffers and also copy the first chunk
|
||||
for i, chunk in ipairs(chunks) do
|
||||
local cbuf = vim.api.nvim_create_buf(false, true)
|
||||
vim.api.nvim_buf_set_name(cbuf, "ChatGPT_Generated_Chunk_" .. i)
|
||||
vim.api.nvim_buf_set_option(cbuf, "filetype", "markdown")
|
||||
|
||||
local lines = {
|
||||
"# Chunk " .. i .. " of " .. #chunks .. ":",
|
||||
"# Copy/paste this chunk into ChatGPT, then come back and copy next chunk as needed.",
|
||||
""
|
||||
}
|
||||
vim.list_extend(lines, vim.split(chunk, "\n"))
|
||||
vim.api.nvim_buf_set_lines(cbuf, 0, -1, false, lines)
|
||||
|
||||
if i == 1 then
|
||||
copy_to_clipboard(chunk)
|
||||
vim.api.nvim_out_write("Copied chunk #1 of " .. #chunks .. " to clipboard!\n")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
-- Mark as unmodified so :wq won't complain
|
||||
vim.api.nvim_buf_set_option(bufnr, "modified", false)
|
||||
end,
|
||||
end
|
||||
})
|
||||
|
||||
-- Switch to the newly created buffer
|
||||
vim.cmd("buffer " .. bufnr)
|
||||
end
|
||||
---------------------------------------------------------------------------
|
||||
|
||||
----------------------------------------------------------------------------
|
||||
-- run_chatgpt_paste_command
|
||||
----------------------------------------------------------------------------
|
||||
function M.run_chatgpt_paste_command()
|
||||
local conf = config.load()
|
||||
if conf.debug then
|
||||
vim.api.nvim_out_write("[chatgpt_nvim:init] Running :ChatGPTPaste command.\n")
|
||||
end
|
||||
ui.debug_log("Running :ChatGPTPaste command.")
|
||||
print("Reading ChatGPT YAML response from clipboard...")
|
||||
local raw = handler.get_clipboard_content()
|
||||
if raw == "" then
|
||||
@@ -189,9 +376,12 @@ function M.run_chatgpt_paste_command()
|
||||
end
|
||||
|
||||
if data.project_name and data.files then
|
||||
if conf.debug then
|
||||
vim.api.nvim_out_write("[chatgpt_nvim:init] Received project_name and files from response.\n")
|
||||
ui.debug_log("Received project_name and files in response.")
|
||||
if data.project_name ~= conf.project_name then
|
||||
vim.api.nvim_err_writeln("Project name mismatch. Aborting.")
|
||||
return
|
||||
end
|
||||
|
||||
local is_final = false
|
||||
for _, fileinfo in ipairs(data.files) do
|
||||
if fileinfo.content or fileinfo.delete == true then
|
||||
@@ -201,47 +391,62 @@ function M.run_chatgpt_paste_command()
|
||||
end
|
||||
|
||||
if is_final then
|
||||
if data.project_name ~= conf.project_name then
|
||||
vim.api.nvim_err_writeln("Project name mismatch. The provided changes are for project '" ..
|
||||
(data.project_name or "unknown") .. "' but current project is '" ..
|
||||
(conf.project_name or "unconfigured") .. "'. Aborting changes.")
|
||||
return
|
||||
if conf.preview_changes then
|
||||
preview_changes(data.files)
|
||||
print("Close the preview window to apply changes, or leave it open and use :q to cancel.")
|
||||
local closed = vim.wait(60000, function()
|
||||
local bufs = vim.api.nvim_list_bufs()
|
||||
for _, b in ipairs(bufs) do
|
||||
local name = vim.api.nvim_buf_get_name(b)
|
||||
if name:match("ChatGPT_Changes_Preview$") then
|
||||
return false
|
||||
end
|
||||
end
|
||||
return true
|
||||
end)
|
||||
if not closed then
|
||||
vim.api.nvim_err_writeln("Preview not closed in time. Aborting.")
|
||||
return
|
||||
end
|
||||
end
|
||||
|
||||
local final_files = data.files
|
||||
if conf.partial_acceptance then
|
||||
final_files = partial_accept(data.files)
|
||||
if #final_files == 0 then
|
||||
vim.api.nvim_err_writeln("No changes remain after partial acceptance. Aborting.")
|
||||
return
|
||||
end
|
||||
end
|
||||
|
||||
local root = vim.fn.getcwd()
|
||||
|
||||
for _, fileinfo in ipairs(data.files) do
|
||||
for _, fileinfo in ipairs(final_files) do
|
||||
if not fileinfo.path then
|
||||
vim.api.nvim_err_writeln("Invalid file entry. Must have 'path'.")
|
||||
goto continue
|
||||
end
|
||||
|
||||
if not is_subpath(root, fileinfo.path) then
|
||||
vim.api.nvim_err_writeln("Invalid file path outside project root: " .. fileinfo.path)
|
||||
vim.api.nvim_err_writeln("Invalid path outside project root: " .. fileinfo.path)
|
||||
goto continue
|
||||
end
|
||||
|
||||
if fileinfo.delete == true then
|
||||
if conf.debug then
|
||||
vim.api.nvim_out_write("[chatgpt_nvim:init] Deleting file: " .. fileinfo.path .. "\n")
|
||||
end
|
||||
ui.debug_log("Deleting file: " .. fileinfo.path)
|
||||
handler.delete_file(fileinfo.path)
|
||||
print("Deleted file: " .. fileinfo.path)
|
||||
print("Deleted: " .. fileinfo.path)
|
||||
elseif fileinfo.content then
|
||||
if conf.debug then
|
||||
vim.api.nvim_out_write("[chatgpt_nvim:init] Writing file: " .. fileinfo.path .. "\n")
|
||||
end
|
||||
ui.debug_log("Writing file: " .. fileinfo.path)
|
||||
handler.write_file(fileinfo.path, fileinfo.content)
|
||||
print("Wrote file: " .. fileinfo.path)
|
||||
print("Wrote: " .. fileinfo.path)
|
||||
else
|
||||
vim.api.nvim_err_writeln("Invalid file entry. Must have 'content' or 'delete' set to true for final changes.")
|
||||
vim.api.nvim_err_writeln("Invalid file entry. Must have 'content' or 'delete'.")
|
||||
end
|
||||
::continue::
|
||||
end
|
||||
|
||||
return
|
||||
else
|
||||
local dirs = conf.directories or {"."}
|
||||
-- Intermediate request for more files
|
||||
local requested_paths = {}
|
||||
for _, fileinfo in ipairs(data.files) do
|
||||
if fileinfo.path then
|
||||
@@ -251,7 +456,6 @@ function M.run_chatgpt_paste_command()
|
||||
|
||||
local file_sections = {}
|
||||
local root = vim.fn.getcwd()
|
||||
|
||||
for _, f in ipairs(requested_paths) do
|
||||
local path = root .. "/" .. f
|
||||
local content = read_file(path)
|
||||
@@ -263,7 +467,7 @@ function M.run_chatgpt_paste_command()
|
||||
end
|
||||
|
||||
local sections = {
|
||||
conf.initial_prompt,
|
||||
config.load().initial_prompt,
|
||||
"\n\nProject name: " .. (conf.project_name or ""),
|
||||
"\n\nBelow are the requested files from the project, each preceded by its filename in backticks and enclosed in triple backticks.\n",
|
||||
table.concat(file_sections, "\n"),
|
||||
@@ -271,16 +475,33 @@ function M.run_chatgpt_paste_command()
|
||||
}
|
||||
|
||||
local prompt = table.concat(sections, "\n")
|
||||
local estimate_fn = get_estimate_fn()
|
||||
|
||||
local token_limit = conf.token_limit or 8000
|
||||
local token_count = estimate_tokens(prompt)
|
||||
local token_count = estimate_fn(prompt)
|
||||
ui.debug_log("Returning requested files. Token count: " .. token_count)
|
||||
|
||||
if conf.debug then
|
||||
vim.api.nvim_out_write("[chatgpt_nvim:init] Returning requested files. Token count: " .. token_count .. "\n")
|
||||
end
|
||||
if token_count > (conf.token_limit or 8000) then
|
||||
if conf.enable_chunking then
|
||||
local chunks = ui.chunkify(prompt, estimate_fn, conf.token_limit or 8000)
|
||||
for i, chunk in ipairs(chunks) do
|
||||
local cbuf = vim.api.nvim_create_buf(false, true)
|
||||
vim.api.nvim_buf_set_name(cbuf, "ChatGPT_Requested_Files_Chunk_" .. i)
|
||||
local lines = {
|
||||
"# Chunk " .. i .. " of " .. #chunks .. ":",
|
||||
"# Copy/paste this chunk into ChatGPT, then come back and copy next chunk as needed.",
|
||||
""
|
||||
}
|
||||
vim.list_extend(lines, vim.split(chunk, "\n"))
|
||||
vim.api.nvim_buf_set_lines(cbuf, 0, -1, false, lines)
|
||||
|
||||
if token_count > token_limit then
|
||||
vim.api.nvim_err_writeln("Too many requested files. Exceeds token limit.")
|
||||
if i == 1 then
|
||||
copy_to_clipboard(chunk)
|
||||
print("Copied chunk #1 of " .. #chunks .. " to clipboard!")
|
||||
end
|
||||
end
|
||||
else
|
||||
vim.api.nvim_err_writeln("Too many requested files. Exceeds token limit.")
|
||||
end
|
||||
return
|
||||
end
|
||||
|
||||
@@ -292,4 +513,4 @@ function M.run_chatgpt_paste_command()
|
||||
end
|
||||
end
|
||||
|
||||
return M
|
||||
return M
|
||||
|
||||
99
lua/chatgpt_nvim/ui.lua
Normal file
99
lua/chatgpt_nvim/ui.lua
Normal file
@@ -0,0 +1,99 @@
|
||||
local M = {}
|
||||
local config = require('chatgpt_nvim.config')
|
||||
local conf = config.load()
|
||||
|
||||
local debug_bufnr = nil
|
||||
if conf.improved_debug then
|
||||
debug_bufnr = vim.api.nvim_create_buf(false, true)
|
||||
vim.api.nvim_buf_set_name(debug_bufnr, "ChatGPT_Debug_Log")
|
||||
vim.api.nvim_buf_set_option(debug_bufnr, "filetype", "log")
|
||||
end
|
||||
|
||||
function M.debug_log(msg)
|
||||
if conf.improved_debug and debug_bufnr then
|
||||
vim.api.nvim_buf_set_lines(debug_bufnr, -1, -1, false, { msg })
|
||||
else
|
||||
if conf.debug then
|
||||
vim.api.nvim_out_write("[chatgpt_nvim:debug] " .. msg .. "\n")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
function M.pick_directories(dirs)
|
||||
local selected_dirs = {}
|
||||
local lines = { "Delete lines for directories you do NOT want, then :wq" }
|
||||
for _, d in ipairs(dirs) do
|
||||
table.insert(lines, d)
|
||||
end
|
||||
|
||||
local bufnr = vim.api.nvim_create_buf(false, false)
|
||||
vim.api.nvim_buf_set_name(bufnr, "ChatGPT_File_Selection")
|
||||
vim.api.nvim_buf_set_option(bufnr, "filetype", "markdown")
|
||||
vim.api.nvim_buf_set_option(bufnr, "bufhidden", "wipe")
|
||||
vim.api.nvim_buf_set_lines(bufnr, 0, -1, false, lines)
|
||||
|
||||
local function on_write()
|
||||
local new_lines = vim.api.nvim_buf_get_lines(bufnr, 0, -1, false)
|
||||
local final_dirs = {}
|
||||
for _, l in ipairs(new_lines) do
|
||||
if l ~= "" and not l:match("^Delete lines") and not l:match("^#") then
|
||||
table.insert(final_dirs, l)
|
||||
end
|
||||
end
|
||||
selected_dirs = final_dirs
|
||||
vim.api.nvim_buf_set_option(bufnr, "modified", false)
|
||||
end
|
||||
|
||||
vim.api.nvim_create_autocmd("BufWriteCmd", {
|
||||
buffer = bufnr,
|
||||
once = true,
|
||||
callback = on_write
|
||||
})
|
||||
|
||||
vim.cmd("split")
|
||||
vim.cmd("buffer " .. bufnr)
|
||||
|
||||
-- Wait up to 30s for user to close
|
||||
vim.wait(30000, function()
|
||||
local winids = vim.api.nvim_tabpage_list_wins(0)
|
||||
for _, w in ipairs(winids) do
|
||||
local b = vim.api.nvim_win_get_buf(w)
|
||||
if b == bufnr then
|
||||
return false
|
||||
end
|
||||
end
|
||||
return true
|
||||
end)
|
||||
|
||||
return selected_dirs
|
||||
end
|
||||
|
||||
-- A function to chunk a long string if it exceeds token_limit
|
||||
-- We'll just do rough splits by lines or paragraphs.
|
||||
function M.chunkify(text, estimate_tokens_fn, token_limit)
|
||||
local lines = vim.split(text, "\n")
|
||||
local chunks = {}
|
||||
local current_chunk = {}
|
||||
local current_text = ""
|
||||
|
||||
for _, line in ipairs(lines) do
|
||||
local test_text = (current_text == "") and line or (current_text .. "\n" .. line)
|
||||
local est_tokens = estimate_tokens_fn(test_text)
|
||||
if est_tokens > token_limit then
|
||||
-- push current chunk
|
||||
table.insert(chunks, current_text)
|
||||
-- start a new chunk
|
||||
current_text = line
|
||||
else
|
||||
current_text = test_text
|
||||
end
|
||||
end
|
||||
|
||||
if current_text ~= "" then
|
||||
table.insert(chunks, current_text)
|
||||
end
|
||||
|
||||
return chunks
|
||||
end
|
||||
|
||||
return M
|
||||
Reference in New Issue
Block a user