From 10fb8e88ce3ddf1190bb37fdc9150e8af757b8cc Mon Sep 17 00:00:00 2001 From: Dominik Polakovics Date: Wed, 10 Dec 2025 13:36:33 +0100 Subject: [PATCH] fix: change click and load package name --- .../pkgs/clicknload-proxy/clicknload-proxy.py | 96 ++++++++++++++++++- 1 file changed, 94 insertions(+), 2 deletions(-) diff --git a/utils/pkgs/clicknload-proxy/clicknload-proxy.py b/utils/pkgs/clicknload-proxy/clicknload-proxy.py index 7a1ae86..389c322 100644 --- a/utils/pkgs/clicknload-proxy/clicknload-proxy.py +++ b/utils/pkgs/clicknload-proxy/clicknload-proxy.py @@ -9,6 +9,7 @@ Implements the Click'n'Load protocol: import argparse import base64 +import html import json import re import sys @@ -24,6 +25,87 @@ def log(msg): print(f"[CNL] {msg}", flush=True) +def fetch_package_name(url): + """Fetch package name from source page by extracting

tag (like JDownloader).""" + if not url or not url.startswith("http"): + return None + + try: + log(f"Fetching package name from {url}") + req = urllib.request.Request( + url, + headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"} + ) + with urllib.request.urlopen(req, timeout=10) as resp: + content = resp.read().decode("utf-8", errors="ignore") + + # Extract

content like JDownloader does + match = re.search(r']*>([^<]+)<', content) + if match: + name = html.unescape(match.group(1)).strip() + log(f"Extracted package name: {name}") + return name + + log("No

tag found on page") + return None + except Exception as e: + log(f"Failed to fetch package name: {e}") + return None + + +def extract_package_name_from_links(links): + """Extract package name from common prefix of link filenames.""" + if not links: + return None + + # Extract filenames from URLs + filenames = [] + for link in links: + parsed = urllib.parse.urlparse(link) + path = urllib.parse.unquote(parsed.path) + filename = path.split("/")[-1] if path else "" + # Remove extension + if "." in filename: + filename = filename.rsplit(".", 1)[0] + if filename: + filenames.append(filename) + + if not filenames: + return None + + if len(filenames) == 1: + # Single file - use its name + name = filenames[0] + log(f"Single file, using name: {name}") + return name + + # Find common prefix among all filenames + prefix = filenames[0] + for filename in filenames[1:]: + while prefix and not filename.startswith(prefix): + # Remove last character or segment + if "." in prefix: + prefix = prefix.rsplit(".", 1)[0] + elif "-" in prefix: + prefix = prefix.rsplit("-", 1)[0] + elif "_" in prefix: + prefix = prefix.rsplit("_", 1)[0] + else: + prefix = prefix[:-1] + + # Clean up trailing separators + prefix = prefix.rstrip(".-_ ") + + if prefix and len(prefix) >= 3: + log(f"Common prefix from {len(filenames)} files: {prefix}") + return prefix + + # Fallback: use first filename + name = filenames[0] + log(f"No common prefix, using first filename: {name}") + return name + + class ClickNLoadHandler(BaseHTTPRequestHandler): pyload_url = None pyload_user = None @@ -66,12 +148,22 @@ class ClickNLoadHandler(BaseHTTPRequestHandler): crypted = params.get("crypted", [""])[0] source = params.get("source", ["Click'n'Load"])[0] - log(f"Received addcrypted2: source={source}, jk_len={len(jk)}, crypted_len={len(crypted)}") + # Get actual page URL from Referer header + referer = self.headers.get("Referer", "") + + log(f"Received addcrypted2: source={source}, referer={referer}") + log(f" jk_len={len(jk)}, crypted_len={len(crypted)}") try: links = self.decrypt_links(jk, crypted) if links: - self.add_to_pyload(links, source) + # Try to get package name: referer page -> link filenames -> source + package_name = ( + fetch_package_name(referer) or + extract_package_name_from_links(links) or + source + ) + self.add_to_pyload(links, package_name) self.send_response(200) self.send_cors_headers() self.end_headers()