feat: add debug logging

This commit is contained in:
2024-12-14 12:48:16 +01:00
parent 314a65a203
commit e97aa81d8f
5 changed files with 104 additions and 52 deletions

View File

@@ -1,4 +1,3 @@
-- lua/chatgpt_nvim/init.lua
local M = {}
local context = require('chatgpt_nvim.context')
@@ -12,6 +11,7 @@ local function copy_to_clipboard(text)
end
local function parse_response(raw)
local conf = config.load()
if not ok_yaml then
vim.api.nvim_err_writeln("lyaml not available. Install with `luarocks install lyaml`.")
return nil
@@ -19,8 +19,14 @@ local function parse_response(raw)
local ok, data = pcall(lyaml.load, raw)
if not ok or not data then
vim.api.nvim_err_writeln("Failed to parse YAML response.")
if conf.debug then
vim.api.nvim_out_write("[chatgpt_nvim:init] RAW response that failed parsing:\n" .. raw .. "\n")
end
return nil
end
if conf.debug then
vim.api.nvim_out_write("[chatgpt_nvim:init] Successfully parsed YAML response.\n")
end
return data
end
@@ -37,32 +43,14 @@ local function is_subpath(root, path)
return target_abs:sub(1, #root_abs) == root_abs
end
-- The improved workflow:
--
-- :ChatGPT:
-- 1) Create a prompt that includes:
-- - The project's name and structure, but NOT file contents.
-- - The initial instructions including that the O1 Model should respond with a YAML listing which files are needed.
-- 2) Copy that prompt to clipboard.
--
-- The O1 Model responds with a YAML listing files it needs.
--
-- :ChatGPTPaste:
-- 1) Parse the YAML from clipboard.
-- 2) If the YAML lists files needed (no final changes), generate a new prompt that includes the requested files content plus a reminder that O1 Model can ask for more files.
-- 3) Copy that prompt to clipboard for O1 Model.
--
-- The O1 Model can continue to ask for more files. Each time we run :ChatGPTPaste with the new YAML, we provide more files. Eventually, the O1 Model returns the final YAML with `project_name` and `files` changes.
--
-- When final YAML with `files` and `project_name` matches current project, we apply the changes.
--
-- If more context is needed, we ask user outside the YAML.
-- We'll store requested files in a global variable for now, though a more robust solution might be needed.
local requested_files = {}
function M.run_chatgpt_command()
local conf = config.load()
if conf.debug then
vim.api.nvim_out_write("[chatgpt_nvim:init] Running :ChatGPT command.\n")
end
local user_input = vim.fn.input("Message for O1 Model: ")
if user_input == "" then
print("No input provided.")
@@ -72,15 +60,6 @@ function M.run_chatgpt_command()
local dirs = conf.directories or {"."}
local project_structure = context.get_project_structure(dirs)
-- Initial prompt without file contents, asking the O1 model which files are needed.
-- We'll instruct the O1 Model to respond with a YAML that includes the files it needs.
-- Format of the O1 model response should be something like:
-- project_name: <project_name>
-- files:
-- - path: "path/to/needed_file"
--
-- No changes yet, just asking for which files the model wants.
local initial_sections = {
conf.initial_prompt .. "\n" .. user_input,
"\n\nProject name: " .. (conf.project_name or "") .. "\n",
@@ -95,6 +74,10 @@ function M.run_chatgpt_command()
local token_limit = conf.token_limit or 8000
local token_count = estimate_tokens(prompt)
if conf.debug then
vim.api.nvim_out_write("[chatgpt_nvim:init] Prompt token count: " .. token_count .. "\n")
end
if token_count > token_limit then
print("Too many files in project structure. The request exceeds the O1 model limit of " .. token_limit .. " tokens.")
return
@@ -105,8 +88,11 @@ function M.run_chatgpt_command()
end
function M.run_chatgpt_paste_command()
print("Reading ChatGPT YAML response from clipboard...")
local conf = config.load()
if conf.debug then
vim.api.nvim_out_write("[chatgpt_nvim:init] Running :ChatGPTPaste command.\n")
end
print("Reading ChatGPT YAML response from clipboard...")
local raw = handler.get_clipboard_content()
if raw == "" then
vim.api.nvim_err_writeln("Clipboard is empty. Please copy the YAML response from ChatGPT first.")
@@ -118,12 +104,10 @@ function M.run_chatgpt_paste_command()
return
end
-- Check if this is the final answer (with modifications) or just requesting more files.
if data.project_name and data.files then
-- The O1 model provided a YAML with files. We must check if these files contain content to apply changes.
-- If 'delete: true' or 'content' is found, that means it's the final set of changes.
-- If only paths are listed without 'content' or 'delete', that means the model is asking for files.
if conf.debug then
vim.api.nvim_out_write("[chatgpt_nvim:init] Received project_name and files from response.\n")
end
local is_final = false
for _, fileinfo in ipairs(data.files) do
if fileinfo.content or fileinfo.delete == true then
@@ -133,7 +117,6 @@ function M.run_chatgpt_paste_command()
end
if is_final then
-- Final changes: apply them
if data.project_name ~= conf.project_name then
vim.api.nvim_err_writeln("Project name mismatch. The provided changes are for project '" ..
(data.project_name or "unknown") .. "' but current project is '" ..
@@ -149,16 +132,21 @@ function M.run_chatgpt_paste_command()
goto continue
end
-- Ensure the path is within the project root
if not is_subpath(root, fileinfo.path) then
vim.api.nvim_err_writeln("Invalid file path outside project root: " .. fileinfo.path)
goto continue
end
if fileinfo.delete == true then
if conf.debug then
vim.api.nvim_out_write("[chatgpt_nvim:init] Deleting file: " .. fileinfo.path .. "\n")
end
handler.delete_file(fileinfo.path)
print("Deleted file: " .. fileinfo.path)
elseif fileinfo.content then
if conf.debug then
vim.api.nvim_out_write("[chatgpt_nvim:init] Writing file: " .. fileinfo.path .. "\n")
end
handler.write_file(fileinfo.path, fileinfo.content)
print("Wrote file: " .. fileinfo.path)
else
@@ -201,7 +189,6 @@ function M.run_chatgpt_paste_command()
end
end
-- Create a new prompt including the requested files plus a reminder that the model can ask for more.
local sections = {
conf.initial_prompt,
"\n\nProject name: " .. (conf.project_name or ""),
@@ -215,6 +202,10 @@ function M.run_chatgpt_paste_command()
local token_limit = conf.token_limit or 8000
local token_count = estimate_tokens(prompt)
if conf.debug then
vim.api.nvim_out_write("[chatgpt_nvim:init] Returning requested files. Token count: " .. token_count .. "\n")
end
if token_count > token_limit then
vim.api.nvim_err_writeln("Too many requested files. Exceeds token limit.")
return