-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathscrape_urls.py
More file actions
269 lines (211 loc) · 7.36 KB
/
scrape_urls.py
File metadata and controls
269 lines (211 loc) · 7.36 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
"""
Scrape synapse.org and sagebionetworks.org for URLs to generate QR codes.
Discovers new URLs by:
1. Fetching sitemaps from both domains
2. Crawling key pages for subdomain links
3. Merging with existing urls.json, preserving manual entries
"""
import json
import re
import sys
import xml.etree.ElementTree as ET
from pathlib import Path
from urllib.parse import urlparse
import requests
TIMEOUT = 15
HEADERS = {"User-Agent": "SageQRCodeBot/1.0 (internal tooling)"}
# Base domains we care about
SYNAPSE_DOMAIN = "synapse.org"
SAGE_DOMAIN = "sagebionetworks.org"
# Pages to crawl for discovering subdomain links
CRAWL_PAGES = [
"https://sagebionetworks.org",
"https://sagebionetworks.org/universe",
"https://sagebionetworks.org/community",
"https://www.synapse.org",
]
# URL patterns to skip (noisy/dynamic pages)
SKIP_PATTERNS = [
r"/wp-content/",
r"/wp-json/",
r"/wp-admin/",
r"/feed/?$",
r"/tag/",
r"/category/",
r"/author/",
r"/page/\d+",
r"\?",
r"#",
r"/\d{4}/\d{2}/", # blog date archives
r"/xmlrpc\.php",
r"/wp-login",
]
def fetch(url):
try:
r = requests.get(url, headers=HEADERS, timeout=TIMEOUT, allow_redirects=True)
r.raise_for_status()
return r.text
except Exception as e:
print(f" [skip] {url}: {e}", file=sys.stderr)
return None
def parse_sitemap(xml_text):
"""Extract URLs from a sitemap XML string. Handles sitemap indexes too."""
urls = set()
try:
root = ET.fromstring(xml_text)
except ET.ParseError:
return urls
ns = {"sm": "http://www.sitemaps.org/schemas/sitemap/0.9"}
# Check for sitemap index
for sitemap in root.findall(".//sm:sitemap/sm:loc", ns):
sub_xml = fetch(sitemap.text.strip())
if sub_xml:
urls |= parse_sitemap(sub_xml)
# Regular URL entries
for loc in root.findall(".//sm:url/sm:loc", ns):
urls.add(loc.text.strip())
return urls
def discover_sitemap_urls(base_url):
"""Try common sitemap locations for a domain."""
urls = set()
sitemap_paths = ["/sitemap.xml", "/sitemap_index.xml", "/wp-sitemap.xml"]
for path in sitemap_paths:
xml = fetch(base_url + path)
if xml:
found = parse_sitemap(xml)
print(f" Found {len(found)} URLs in {base_url}{path}", file=sys.stderr)
urls |= found
return urls
def extract_links_from_html(html, source_url):
"""Pull all href links from HTML."""
urls = set()
for match in re.finditer(r'href=["\']([^"\']+)["\']', html):
href = match.group(1)
if href.startswith("http"):
urls.add(href)
return urls
def crawl_for_links():
"""Crawl key pages to discover linked subdomains and pages."""
urls = set()
for page in CRAWL_PAGES:
print(f" Crawling {page}...", file=sys.stderr)
html = fetch(page)
if html:
urls |= extract_links_from_html(html, page)
return urls
def should_skip(url):
for pat in SKIP_PATTERNS:
if re.search(pat, url):
return True
return False
def normalize_url(url):
"""Normalize a URL: strip trailing slash, fragment, query."""
parsed = urlparse(url)
path = parsed.path.rstrip("/")
return f"{parsed.scheme}://{parsed.netloc}{path}"
def categorize_url(url):
"""Return (section_id, is_subdomain) for a URL, or None if not relevant."""
parsed = urlparse(url)
host = parsed.hostname or ""
path = parsed.path.rstrip("/")
if host.endswith(SYNAPSE_DOMAIN):
parts = host.replace(SYNAPSE_DOMAIN, "").rstrip(".")
if parts and parts != "www": # real subdomain like genie., htan., etc.
return "syn"
# www.synapse.org or synapse.org root only, skip app routes
if not path or path == "/":
return "syn"
return None # skip deep synapse.org paths (app routes)
if host.endswith(SAGE_DOMAIN):
parts = host.replace(SAGE_DOMAIN, "").rstrip(".")
if parts: # subdomain like accounts., blog., tower.
return "sage"
# sagebionetworks.org paths
if path.startswith("/community/"):
return "comm"
if path and path != "/":
# Only include top-level-ish pages, not deeply nested
depth = len([p for p in path.split("/") if p])
if depth <= 2:
return "pages"
return None
return "sage"
return None
def make_desc(url):
"""Generate a placeholder description from the URL."""
parsed = urlparse(url)
host = parsed.hostname or ""
path = parsed.path.strip("/")
if path:
name = path.split("/")[-1].replace("-", " ").replace("_", " ").title()
return name
else:
sub = host.replace(f".{SYNAPSE_DOMAIN}", "").replace(f".{SAGE_DOMAIN}", "")
if sub and sub != "www":
return sub.replace("-", " ").replace("_", " ").title()
return host
def load_existing():
p = Path(__file__).parent / "urls.json"
if p.exists():
return json.loads(p.read_text())
return []
def save_urls(data):
p = Path(__file__).parent / "urls.json"
p.write_text(json.dumps(data, indent=2) + "\n")
def main():
print("Loading existing urls.json...", file=sys.stderr)
existing = load_existing()
# Build lookup of existing URLs (for deduplication)
existing_urls = {}
for section in existing:
for item in section["items"]:
existing_urls[normalize_url(item["url"])] = item
# Discover new URLs
print("Discovering URLs from sitemaps...", file=sys.stderr)
sage_sitemap = discover_sitemap_urls(f"https://{SAGE_DOMAIN}")
synapse_sitemap = discover_sitemap_urls(f"https://www.{SYNAPSE_DOMAIN}")
print("Crawling pages for links...", file=sys.stderr)
crawled = crawl_for_links()
all_discovered = sage_sitemap | synapse_sitemap | crawled
print(f"Total discovered: {len(all_discovered)} raw URLs", file=sys.stderr)
# Filter and categorize
new_count = 0
additions = {"syn": [], "sage": [], "pages": [], "comm": []}
for url in sorted(all_discovered):
if should_skip(url):
continue
norm = normalize_url(url)
if norm in existing_urls:
continue
section_id = categorize_url(norm)
if section_id is None:
continue
additions[section_id].append({"url": norm, "desc": make_desc(norm)})
new_count += 1
print(f"New URLs to add: {new_count}", file=sys.stderr)
# Merge into existing structure
section_map = {s["id"]: s for s in existing}
for sid, new_items in additions.items():
if sid in section_map:
section_map[sid]["items"].extend(new_items)
elif new_items:
# Shouldn't happen with current categories, but handle gracefully
existing.append(
{
"section": sid,
"id": sid,
"items": new_items,
}
)
# Sort items within each section
for section in existing:
section["items"].sort(key=lambda i: i["url"])
save_urls(existing)
total = sum(len(s["items"]) for s in existing)
print(f"Done. {total} total URLs ({new_count} new).", file=sys.stderr)
if new_count > 0:
print("NEW_URLS_FOUND=true")
else:
print("NEW_URLS_FOUND=false")
if __name__ == "__main__":
main()