-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathurl-scrapper.py
191 lines (159 loc) · 6.45 KB
/
url-scrapper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
import sys
import argparse
import requests
from urllib.parse import urlparse, urljoin
from bs4 import BeautifulSoup
import re
from concurrent.futures import ThreadPoolExecutor, as_completed
import threading
import os
# Global sets protected by locks for thread-safety
visited_lock = threading.Lock()
visited_urls = set()
external_lock = threading.Lock()
external_domains = set()
def strip_www(domain):
return domain.lower().lstrip("www.")
def normalize_domain(input_domain):
if not input_domain.startswith("http://") and not input_domain.startswith("https://"):
input_domain = "http://" + input_domain
parsed = urlparse(input_domain)
base_domain = strip_www(parsed.netloc)
base_url = f"{parsed.scheme}://{parsed.netloc}"
return base_url, base_domain
def extract_links_from_html(html):
"""Extract hrefs from <a> tags and also attempt to find urls in the page using a regex."""
soup = BeautifulSoup(html, "html.parser")
hrefs = []
# Extract from <a> tags
for link in soup.find_all("a", href=True):
href = link['href'].strip()
if href.startswith("mailto:") or href.startswith("javascript:"):
continue
hrefs.append(href)
# Additionally, search whole HTML for URLs that might not be in <a> tags
url_pattern = re.compile(r"https?://[^\s\"'<>]+", re.IGNORECASE)
all_urls = url_pattern.findall(html)
for u in all_urls:
if u not in hrefs:
hrefs.append(u)
return hrefs
def categorize_links(hrefs, base_url, base_domain):
internal_urls = set()
found_externals = set()
for href in hrefs:
absolute_url = urljoin(base_url, href)
parsed = urlparse(absolute_url)
if not parsed.netloc:
continue
link_domain = strip_www(parsed.netloc)
if link_domain == base_domain:
internal_urls.add(absolute_url)
else:
found_externals.add(link_domain)
return internal_urls, found_externals
def process_url(url, depth, max_depth, base_url, base_domain, verbose, ignored_domains):
# Check if already visited
with visited_lock:
if url in visited_urls:
return []
visited_urls.add(url)
if verbose:
print(f"[INFO] Crawling: {url} (depth: {depth})")
headers = {
"User-Agent": (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/96.0.4664.110 Safari/537.36"
)
}
try:
r = requests.get(url, headers=headers, timeout=10)
r.raise_for_status()
page_html = r.text
except Exception as e:
if verbose:
print(f"[ERROR] Failed to retrieve {url}: {e}")
return []
hrefs = extract_links_from_html(page_html)
internal_urls, externals = categorize_links(hrefs, base_url, base_domain)
# Filter external domains by ignored domains
externals = {d for d in externals if d not in ignored_domains}
newly_found_externals = []
with external_lock:
for d in externals:
if d not in external_domains:
external_domains.add(d)
newly_found_externals.append(d)
if verbose and newly_found_externals:
print(f"[INFO] Found external domains at {url}:")
for domain in newly_found_externals:
print(f" - {domain}")
next_tasks = []
if depth < max_depth:
new_internal_links = []
with visited_lock:
for link in internal_urls:
if link not in visited_urls:
new_internal_links.append(link)
if verbose and new_internal_links:
print(f"[INFO] Discovered internal links at {url}:")
for link in new_internal_links:
print(f" + {link}")
for link in new_internal_links:
next_tasks.append((link, depth + 1))
return next_tasks
def main():
parser = argparse.ArgumentParser(description="A multi-threaded web crawler that finds external domains.")
parser.add_argument("domain", help="The starting domain to crawl.")
parser.add_argument("--depth", type=int, default=1, help="Max depth to crawl. Default=1.")
parser.add_argument("--output", default="external_domains.txt", help="Output file for external domains.")
parser.add_argument("--ignore-file", default=None, help="File containing domains to ignore.")
parser.add_argument("-v", "--verbose", action="store_true", help="Enable verbose logging.")
args = parser.parse_args()
base_url, base_domain = normalize_domain(args.domain)
max_depth = args.depth
output_file = args.output
verbose = args.verbose
# Load ignored domains if provided
ignored_domains = set()
if args.ignore_file and os.path.exists(args.ignore_file):
try:
with open(args.ignore_file, 'r') as f:
for line in f:
d = line.strip()
if d:
ignored_domains.add(strip_www(d))
except Exception as e:
if verbose:
print(f"[ERROR] Could not load ignore file {args.ignore_file}: {e}")
if verbose:
print(f"[START] Starting crawl at {base_url} up to depth {max_depth}")
if ignored_domains:
print("[INFO] Ignored domains loaded:")
for d in ignored_domains:
print(f" - {d}")
max_workers = 5 # Adjust as needed
tasks = [(base_url, 0)]
# We'll process tasks in a loop as they generate new tasks
with ThreadPoolExecutor(max_workers=max_workers) as executor:
futures = [executor.submit(process_url, url, depth, max_depth, base_url, base_domain, verbose, ignored_domains)
for url, depth in tasks]
while futures:
done, futures = as_completed(futures, timeout=None), []
new_tasks = []
for future in done:
result = future.result()
if result:
new_tasks.extend(result)
for (u, d) in new_tasks:
futures.append(executor.submit(process_url, u, d, max_depth, base_url, base_domain, verbose, ignored_domains))
if verbose:
print("[DONE] Crawling complete.")
with open(output_file, 'w') as f:
with external_lock:
for domain in sorted(external_domains):
f.write(domain + "\n")
print(f"Found {len(external_domains)} external domains. Results saved to {output_file}.")
if __name__ == "__main__":
main()