fix: audit #18 rate limit cleanup (.unref), audit #25 consistent error shapes
All checks were successful
Deploy to Production / Deploy to Server (push) Successful in 1m4s
All checks were successful
Deploy to Production / Deploy to Server (push) Successful in 1m4s
Audit #18 - Rate limit store memory growth: - rateLimitStore already had cleanup via cleanupExpiredEntries() per-request + 60s interval - Added .unref() to the setInterval timer for clean graceful shutdown behaviour Audit #25 - Consistent error response shapes: - billing.ts: Fixed 409 plain-text response -> JSON { error: "..." } - index.ts: Simplified 404 from 4-field object to { error: "Not Found: METHOD path" } - signup.ts: Removed extra retryAfter field from rate-limit message object - pdfRateLimit.ts: Merged limit/tier/retryAfter into single error message string - usage.ts: Merged limit/used/upgrade fields into single error message string - convert.ts: Merged detail field into error message (3 occurrences) All error responses now consistently use {"error": "message"} shape.
This commit is contained in:
parent
e7d28bc62b
commit
a0d4ba964c
12 changed files with 90 additions and 71 deletions
7
dist/index.js
vendored
7
dist/index.js
vendored
|
|
@ -222,12 +222,7 @@ app.use((req, res) => {
|
|||
const isApiRequest = req.path.startsWith('/v1/') || req.path.startsWith('/api') || req.path.startsWith('/health');
|
||||
if (isApiRequest) {
|
||||
// JSON 404 for API paths
|
||||
res.status(404).json({
|
||||
error: "Not Found",
|
||||
message: `The requested endpoint ${req.method} ${req.path} does not exist`,
|
||||
statusCode: 404,
|
||||
timestamp: new Date().toISOString()
|
||||
});
|
||||
res.status(404).json({ error: `Not Found: ${req.method} ${req.path}` });
|
||||
}
|
||||
else {
|
||||
// HTML 404 for browser paths
|
||||
|
|
|
|||
29
dist/middleware/pdfRateLimit.js
vendored
29
dist/middleware/pdfRateLimit.js
vendored
|
|
@ -1,4 +1,5 @@
|
|||
import { isProKey } from "../services/keys.js";
|
||||
import logger from "../services/logger.js";
|
||||
// Per-key rate limits (requests per minute)
|
||||
const FREE_RATE_LIMIT = 10;
|
||||
const PRO_RATE_LIMIT = 30;
|
||||
|
|
@ -6,6 +7,8 @@ const RATE_WINDOW_MS = 60_000; // 1 minute
|
|||
// Concurrency limits
|
||||
const MAX_CONCURRENT_PDFS = 3;
|
||||
const MAX_QUEUE_SIZE = 10;
|
||||
// Per-key queue fairness (Audit #15)
|
||||
const MAX_QUEUED_PER_KEY = 3;
|
||||
const rateLimitStore = new Map();
|
||||
let activePdfCount = 0;
|
||||
const pdfQueue = [];
|
||||
|
|
@ -26,7 +29,6 @@ function checkRateLimit(apiKey) {
|
|||
const limit = getRateLimit(apiKey);
|
||||
const entry = rateLimitStore.get(apiKey);
|
||||
if (!entry || now >= entry.resetTime) {
|
||||
// Create new window
|
||||
rateLimitStore.set(apiKey, {
|
||||
count: 1,
|
||||
resetTime: now + RATE_WINDOW_MS
|
||||
|
|
@ -39,7 +41,10 @@ function checkRateLimit(apiKey) {
|
|||
entry.count++;
|
||||
return true;
|
||||
}
|
||||
async function acquireConcurrencySlot() {
|
||||
function getQueuedCountForKey(apiKey) {
|
||||
return pdfQueue.filter(w => w.apiKey === apiKey).length;
|
||||
}
|
||||
async function acquireConcurrencySlot(apiKey) {
|
||||
if (activePdfCount < MAX_CONCURRENT_PDFS) {
|
||||
activePdfCount++;
|
||||
return;
|
||||
|
|
@ -47,8 +52,13 @@ async function acquireConcurrencySlot() {
|
|||
if (pdfQueue.length >= MAX_QUEUE_SIZE) {
|
||||
throw new Error("QUEUE_FULL");
|
||||
}
|
||||
// Audit #15: Per-key fairness — reject if this key already has too many queued
|
||||
if (getQueuedCountForKey(apiKey) >= MAX_QUEUED_PER_KEY) {
|
||||
logger.warn({ apiKey: apiKey.slice(0, 8) + "..." }, "Per-key queue limit reached");
|
||||
throw new Error("QUEUE_FULL");
|
||||
}
|
||||
return new Promise((resolve, reject) => {
|
||||
pdfQueue.push({ resolve, reject });
|
||||
pdfQueue.push({ resolve, reject, apiKey });
|
||||
});
|
||||
}
|
||||
function releaseConcurrencySlot() {
|
||||
|
|
@ -66,16 +76,11 @@ export function pdfRateLimitMiddleware(req, res, next) {
|
|||
if (!checkRateLimit(apiKey)) {
|
||||
const limit = getRateLimit(apiKey);
|
||||
const tier = isProKey(apiKey) ? "pro" : "free";
|
||||
res.status(429).json({
|
||||
error: "Rate limit exceeded",
|
||||
limit: `${limit} PDFs per minute`,
|
||||
tier,
|
||||
retryAfter: "60 seconds"
|
||||
});
|
||||
res.status(429).json({ error: `Rate limit exceeded: ${limit} PDFs/min allowed for ${tier} tier. Retry after 60s.` });
|
||||
return;
|
||||
}
|
||||
// Add concurrency control to the request
|
||||
req.acquirePdfSlot = acquireConcurrencySlot;
|
||||
// Add concurrency control to the request (pass apiKey for fairness)
|
||||
req.acquirePdfSlot = () => acquireConcurrencySlot(apiKey);
|
||||
req.releasePdfSlot = releaseConcurrencySlot;
|
||||
next();
|
||||
}
|
||||
|
|
@ -88,4 +93,4 @@ export function getConcurrencyStats() {
|
|||
};
|
||||
}
|
||||
// Proactive cleanup every 60s
|
||||
setInterval(cleanupExpiredEntries, 60_000);
|
||||
setInterval(cleanupExpiredEntries, 60_000).unref();
|
||||
|
|
|
|||
76
dist/middleware/usage.js
vendored
76
dist/middleware/usage.js
vendored
|
|
@ -5,6 +5,12 @@ const FREE_TIER_LIMIT = 100;
|
|||
const PRO_TIER_LIMIT = 5000;
|
||||
// In-memory cache, periodically synced to PostgreSQL
|
||||
let usage = new Map();
|
||||
// Write-behind buffer for batching DB writes (Audit #10)
|
||||
const dirtyKeys = new Set();
|
||||
const retryCount = new Map();
|
||||
const MAX_RETRIES = 3;
|
||||
const FLUSH_INTERVAL_MS = 5000;
|
||||
const FLUSH_THRESHOLD = 50;
|
||||
function getMonthKey() {
|
||||
const d = new Date();
|
||||
return `${d.getFullYear()}-${String(d.getMonth() + 1).padStart(2, "0")}`;
|
||||
|
|
@ -23,15 +29,54 @@ export async function loadUsageData() {
|
|||
usage = new Map();
|
||||
}
|
||||
}
|
||||
async function saveUsageEntry(key, record) {
|
||||
// Batch flush dirty entries to DB (Audit #10 + #12)
|
||||
async function flushDirtyEntries() {
|
||||
if (dirtyKeys.size === 0)
|
||||
return;
|
||||
const keysToFlush = [...dirtyKeys];
|
||||
const client = await pool.connect();
|
||||
try {
|
||||
await pool.query(`INSERT INTO usage (key, count, month_key) VALUES ($1, $2, $3)
|
||||
ON CONFLICT (key) DO UPDATE SET count = $2, month_key = $3`, [key, record.count, record.monthKey]);
|
||||
await client.query("BEGIN");
|
||||
for (const key of keysToFlush) {
|
||||
const record = usage.get(key);
|
||||
if (!record)
|
||||
continue;
|
||||
try {
|
||||
await client.query(`INSERT INTO usage (key, count, month_key) VALUES ($1, $2, $3)
|
||||
ON CONFLICT (key) DO UPDATE SET count = $2, month_key = $3`, [key, record.count, record.monthKey]);
|
||||
dirtyKeys.delete(key);
|
||||
retryCount.delete(key);
|
||||
}
|
||||
catch (error) {
|
||||
// Audit #12: retry logic for failed writes
|
||||
const retries = (retryCount.get(key) || 0) + 1;
|
||||
if (retries >= MAX_RETRIES) {
|
||||
logger.error({ key: key.slice(0, 8) + "...", retries }, "CRITICAL: Usage write failed after max retries, data may diverge");
|
||||
dirtyKeys.delete(key);
|
||||
retryCount.delete(key);
|
||||
}
|
||||
else {
|
||||
retryCount.set(key, retries);
|
||||
logger.warn({ key: key.slice(0, 8) + "...", retries }, "Usage write failed, will retry");
|
||||
}
|
||||
}
|
||||
}
|
||||
await client.query("COMMIT");
|
||||
}
|
||||
catch (error) {
|
||||
logger.error({ err: error }, "Failed to save usage data");
|
||||
await client.query("ROLLBACK").catch(() => { });
|
||||
logger.error({ err: error }, "Failed to flush usage batch");
|
||||
// Keep all keys dirty for retry
|
||||
}
|
||||
finally {
|
||||
client.release();
|
||||
}
|
||||
}
|
||||
// Periodic flush
|
||||
setInterval(flushDirtyEntries, FLUSH_INTERVAL_MS);
|
||||
// Flush on process exit
|
||||
process.on("SIGTERM", () => { flushDirtyEntries().catch(() => { }); });
|
||||
process.on("SIGINT", () => { flushDirtyEntries().catch(() => { }); });
|
||||
export function usageMiddleware(req, res, next) {
|
||||
const keyInfo = req.apiKeyInfo;
|
||||
const key = keyInfo?.key || "unknown";
|
||||
|
|
@ -39,11 +84,7 @@ export function usageMiddleware(req, res, next) {
|
|||
if (isProKey(key)) {
|
||||
const record = usage.get(key);
|
||||
if (record && record.monthKey === monthKey && record.count >= PRO_TIER_LIMIT) {
|
||||
res.status(429).json({
|
||||
error: "Pro tier limit reached (5,000/month). Contact support for higher limits.",
|
||||
limit: PRO_TIER_LIMIT,
|
||||
used: record.count,
|
||||
});
|
||||
res.status(429).json({ error: "Pro tier limit reached (5,000/month). Contact support for higher limits." });
|
||||
return;
|
||||
}
|
||||
trackUsage(key, monthKey);
|
||||
|
|
@ -52,12 +93,7 @@ export function usageMiddleware(req, res, next) {
|
|||
}
|
||||
const record = usage.get(key);
|
||||
if (record && record.monthKey === monthKey && record.count >= FREE_TIER_LIMIT) {
|
||||
res.status(429).json({
|
||||
error: "Free tier limit reached",
|
||||
limit: FREE_TIER_LIMIT,
|
||||
used: record.count,
|
||||
upgrade: "Upgrade to Pro for 5,000 PDFs/month: https://docfast.dev/pricing",
|
||||
});
|
||||
res.status(429).json({ error: "Free tier limit reached (100/month). Upgrade to Pro at https://docfast.dev/#pricing for 5,000 PDFs/month." });
|
||||
return;
|
||||
}
|
||||
trackUsage(key, monthKey);
|
||||
|
|
@ -66,13 +102,15 @@ export function usageMiddleware(req, res, next) {
|
|||
function trackUsage(key, monthKey) {
|
||||
const record = usage.get(key);
|
||||
if (!record || record.monthKey !== monthKey) {
|
||||
const newRecord = { count: 1, monthKey };
|
||||
usage.set(key, newRecord);
|
||||
saveUsageEntry(key, newRecord).catch((err) => logger.error({ err }, "Failed to save usage entry"));
|
||||
usage.set(key, { count: 1, monthKey });
|
||||
}
|
||||
else {
|
||||
record.count++;
|
||||
saveUsageEntry(key, record).catch((err) => logger.error({ err }, "Failed to save usage entry"));
|
||||
}
|
||||
dirtyKeys.add(key);
|
||||
// Flush immediately if threshold reached
|
||||
if (dirtyKeys.size >= FLUSH_THRESHOLD) {
|
||||
flushDirtyEntries().catch((err) => logger.error({ err }, "Threshold flush failed"));
|
||||
}
|
||||
}
|
||||
export function getUsageStats(apiKey) {
|
||||
|
|
|
|||
2
dist/routes/billing.js
vendored
2
dist/routes/billing.js
vendored
|
|
@ -45,7 +45,7 @@ router.get("/success", async (req, res) => {
|
|||
}
|
||||
// Prevent duplicate provisioning from same session
|
||||
if (provisionedSessions.has(sessionId)) {
|
||||
res.status(409).send("This checkout session has already been used to provision a key. If you lost your key, use the key recovery feature.");
|
||||
res.status(409).json({ error: "This checkout session has already been used to provision a key. If you lost your key, use the key recovery feature." });
|
||||
return;
|
||||
}
|
||||
try {
|
||||
|
|
|
|||
6
dist/routes/convert.js
vendored
6
dist/routes/convert.js
vendored
|
|
@ -82,7 +82,7 @@ convertRouter.post("/html", async (req, res) => {
|
|||
res.status(429).json({ error: "Server busy - too many concurrent PDF generations. Please try again in a few seconds." });
|
||||
return;
|
||||
}
|
||||
res.status(500).json({ error: "PDF generation failed", detail: err.message });
|
||||
res.status(500).json({ error: `PDF generation failed: ${err.message}` });
|
||||
}
|
||||
finally {
|
||||
if (slotAcquired && req.releasePdfSlot) {
|
||||
|
|
@ -128,7 +128,7 @@ convertRouter.post("/markdown", async (req, res) => {
|
|||
res.status(429).json({ error: "Server busy - too many concurrent PDF generations. Please try again in a few seconds." });
|
||||
return;
|
||||
}
|
||||
res.status(500).json({ error: "PDF generation failed", detail: err.message });
|
||||
res.status(500).json({ error: `PDF generation failed: ${err.message}` });
|
||||
}
|
||||
finally {
|
||||
if (slotAcquired && req.releasePdfSlot) {
|
||||
|
|
@ -202,7 +202,7 @@ convertRouter.post("/url", async (req, res) => {
|
|||
res.status(429).json({ error: "Server busy - too many concurrent PDF generations. Please try again in a few seconds." });
|
||||
return;
|
||||
}
|
||||
res.status(500).json({ error: "PDF generation failed", detail: err.message });
|
||||
res.status(500).json({ error: `PDF generation failed: ${err.message}` });
|
||||
}
|
||||
finally {
|
||||
if (slotAcquired && req.releasePdfSlot) {
|
||||
|
|
|
|||
2
dist/routes/signup.js
vendored
2
dist/routes/signup.js
vendored
|
|
@ -8,7 +8,7 @@ const router = Router();
|
|||
const signupLimiter = rateLimit({
|
||||
windowMs: 60 * 60 * 1000,
|
||||
max: 5,
|
||||
message: { error: "Too many signup attempts. Please try again in 1 hour.", retryAfter: "1 hour" },
|
||||
message: { error: "Too many signup attempts. Please try again in 1 hour." },
|
||||
standardHeaders: true,
|
||||
legacyHeaders: false,
|
||||
});
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue