-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathextract.py
72 lines (63 loc) · 1.98 KB
/
extract.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import re
import argparse
from urllib.parse import urlparse, parse_qs
import requests
from bs4 import BeautifulSoup
# Define multiple regex patterns for OAuth URL parameters
oauth_patterns = [
re.compile(
r"(https?://[^\s]+?)(\?|&)(client_id|redirect_uri|response_type|scope|state)=[^\s&]+",
re.IGNORECASE,
),
re.compile(
r"(https?://[^\s]+?)(\?|&)(access_token|authorization_code|token_type|code_challenge)=[^\s&]+",
re.IGNORECASE,
),
]
def extract_oauth_urls(url):
"""Check if a URL contains OAuth parameters."""
for pattern in oauth_patterns:
match = pattern.search(url)
if match:
parsed_url = urlparse(url)
params = parse_qs(parsed_url.query)
if any(
param in params
for param in [
"client_id",
"redirect_uri",
"response_type",
"scope",
"state",
"access_token",
"authorization_code",
"token_type",
"code_challenge",
]
):
print(f"Potential OAuth URL found: {url}")
return url
return None
def main():
parser = argparse.ArgumentParser(
description="Extract OAuth URLs from a domain or file."
)
parser.add_argument(
"-f",
"--file",
type=argparse.FileType("r"),
help="File with URLs to scan or '-' for stdin.",
required=True,
)
args = parser.parse_args()
try:
urls = [line.strip() for line in args.file if line.strip()]
print("Loaded URLs:", urls) # Debugging output
for url in urls:
# Debugging output to verify each URL being processed
print(f"Processing URL: {url}")
extract_oauth_urls(url)
except Exception as e:
print("Error reading file:", e)
if __name__ == "__main__":
main()