All checks were successful
Deploy to Production / Deploy to Server (push) Successful in 1m4s
Audit #18 - Rate limit store memory growth: - rateLimitStore already had cleanup via cleanupExpiredEntries() per-request + 60s interval - Added .unref() to the setInterval timer for clean graceful shutdown behaviour Audit #25 - Consistent error response shapes: - billing.ts: Fixed 409 plain-text response -> JSON { error: "..." } - index.ts: Simplified 404 from 4-field object to { error: "Not Found: METHOD path" } - signup.ts: Removed extra retryAfter field from rate-limit message object - pdfRateLimit.ts: Merged limit/tier/retryAfter into single error message string - usage.ts: Merged limit/used/upgrade fields into single error message string - convert.ts: Merged detail field into error message (3 occurrences) All error responses now consistently use {"error": "message"} shape.
96 lines
3 KiB
JavaScript
96 lines
3 KiB
JavaScript
import { isProKey } from "../services/keys.js";
|
|
import logger from "../services/logger.js";
|
|
// Per-key rate limits (requests per minute)
|
|
const FREE_RATE_LIMIT = 10;
|
|
const PRO_RATE_LIMIT = 30;
|
|
const RATE_WINDOW_MS = 60_000; // 1 minute
|
|
// Concurrency limits
|
|
const MAX_CONCURRENT_PDFS = 3;
|
|
const MAX_QUEUE_SIZE = 10;
|
|
// Per-key queue fairness (Audit #15)
|
|
const MAX_QUEUED_PER_KEY = 3;
|
|
const rateLimitStore = new Map();
|
|
let activePdfCount = 0;
|
|
const pdfQueue = [];
|
|
function cleanupExpiredEntries() {
|
|
const now = Date.now();
|
|
for (const [key, entry] of rateLimitStore.entries()) {
|
|
if (now >= entry.resetTime) {
|
|
rateLimitStore.delete(key);
|
|
}
|
|
}
|
|
}
|
|
function getRateLimit(apiKey) {
|
|
return isProKey(apiKey) ? PRO_RATE_LIMIT : FREE_RATE_LIMIT;
|
|
}
|
|
function checkRateLimit(apiKey) {
|
|
cleanupExpiredEntries();
|
|
const now = Date.now();
|
|
const limit = getRateLimit(apiKey);
|
|
const entry = rateLimitStore.get(apiKey);
|
|
if (!entry || now >= entry.resetTime) {
|
|
rateLimitStore.set(apiKey, {
|
|
count: 1,
|
|
resetTime: now + RATE_WINDOW_MS
|
|
});
|
|
return true;
|
|
}
|
|
if (entry.count >= limit) {
|
|
return false;
|
|
}
|
|
entry.count++;
|
|
return true;
|
|
}
|
|
function getQueuedCountForKey(apiKey) {
|
|
return pdfQueue.filter(w => w.apiKey === apiKey).length;
|
|
}
|
|
async function acquireConcurrencySlot(apiKey) {
|
|
if (activePdfCount < MAX_CONCURRENT_PDFS) {
|
|
activePdfCount++;
|
|
return;
|
|
}
|
|
if (pdfQueue.length >= MAX_QUEUE_SIZE) {
|
|
throw new Error("QUEUE_FULL");
|
|
}
|
|
// Audit #15: Per-key fairness — reject if this key already has too many queued
|
|
if (getQueuedCountForKey(apiKey) >= MAX_QUEUED_PER_KEY) {
|
|
logger.warn({ apiKey: apiKey.slice(0, 8) + "..." }, "Per-key queue limit reached");
|
|
throw new Error("QUEUE_FULL");
|
|
}
|
|
return new Promise((resolve, reject) => {
|
|
pdfQueue.push({ resolve, reject, apiKey });
|
|
});
|
|
}
|
|
function releaseConcurrencySlot() {
|
|
activePdfCount--;
|
|
const waiter = pdfQueue.shift();
|
|
if (waiter) {
|
|
activePdfCount++;
|
|
waiter.resolve();
|
|
}
|
|
}
|
|
export function pdfRateLimitMiddleware(req, res, next) {
|
|
const keyInfo = req.apiKeyInfo;
|
|
const apiKey = keyInfo?.key || "unknown";
|
|
// Check rate limit first
|
|
if (!checkRateLimit(apiKey)) {
|
|
const limit = getRateLimit(apiKey);
|
|
const tier = isProKey(apiKey) ? "pro" : "free";
|
|
res.status(429).json({ error: `Rate limit exceeded: ${limit} PDFs/min allowed for ${tier} tier. Retry after 60s.` });
|
|
return;
|
|
}
|
|
// Add concurrency control to the request (pass apiKey for fairness)
|
|
req.acquirePdfSlot = () => acquireConcurrencySlot(apiKey);
|
|
req.releasePdfSlot = releaseConcurrencySlot;
|
|
next();
|
|
}
|
|
export function getConcurrencyStats() {
|
|
return {
|
|
activePdfCount,
|
|
queueSize: pdfQueue.length,
|
|
maxConcurrent: MAX_CONCURRENT_PDFS,
|
|
maxQueue: MAX_QUEUE_SIZE
|
|
};
|
|
}
|
|
// Proactive cleanup every 60s
|
|
setInterval(cleanupExpiredEntries, 60_000).unref();
|