-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathscrape_site.py
More file actions
312 lines (248 loc) · 9.05 KB
/
scrape_site.py
File metadata and controls
312 lines (248 loc) · 9.05 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
#!/usr/bin/env python3
"""
Script to scrape skullspace.ca WordPress site and convert to Hugo format.
Downloads all images and converts content to Markdown.
"""
import os
import re
import sys
import time
import urllib.parse
from pathlib import Path
from urllib.parse import urljoin, urlparse
import html2text
import requests
from bs4 import BeautifulSoup
# Configuration
BASE_URL = "https://skullspace.ca"
OUTPUT_DIR = Path(__file__).parent
CONTENT_DIR = OUTPUT_DIR / "content"
STATIC_DIR = OUTPUT_DIR / "static"
IMAGES_DIR = STATIC_DIR / "img"
# Create directories
CONTENT_DIR.mkdir(exist_ok=True)
STATIC_DIR.mkdir(exist_ok=True)
IMAGES_DIR.mkdir(exist_ok=True, parents=True)
# Session for connection pooling
session = requests.Session()
session.headers.update({
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36'
})
# Track downloaded images to avoid duplicates
downloaded_images = {}
downloaded_urls = set()
def sanitize_filename(filename):
"""Sanitize filename for filesystem."""
# Remove or replace invalid characters
filename = re.sub(r'[<>:"/\\|?*]', '-', filename)
filename = filename.strip('. ')
return filename[:200] # Limit length
def download_image(img_url, page_slug=""):
"""Download an image and return the local path."""
if img_url in downloaded_images:
return downloaded_images[img_url]
try:
# Make absolute URL
if not img_url.startswith('http'):
img_url = urljoin(BASE_URL, img_url)
# Parse URL to get filename
parsed = urlparse(img_url)
filename = os.path.basename(parsed.path)
# If no filename, generate one
if not filename or '.' not in filename:
ext = 'jpg' # default
if 'png' in img_url.lower():
ext = 'png'
elif 'gif' in img_url.lower():
ext = 'gif'
elif 'svg' in img_url.lower():
ext = 'svg'
filename = f"image_{hash(img_url) % 10000}.{ext}"
filename = sanitize_filename(filename)
# Create subdirectory based on page if provided
if page_slug:
page_img_dir = IMAGES_DIR / page_slug
page_img_dir.mkdir(exist_ok=True, parents=True)
local_path = page_img_dir / filename
hugo_path = f"/img/{page_slug}/{filename}"
else:
local_path = IMAGES_DIR / filename
hugo_path = f"/img/{filename}"
# Skip if already downloaded
if local_path.exists():
downloaded_images[img_url] = hugo_path
return hugo_path
# Download image
print(f" Downloading image: {img_url}")
response = session.get(img_url, timeout=30, stream=True)
response.raise_for_status()
# Save image
with open(local_path, 'wb') as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
downloaded_images[img_url] = hugo_path
time.sleep(0.5) # Be polite
return hugo_path
except Exception as e:
print(f" Error downloading image {img_url}: {e}")
return img_url # Return original URL if download fails
def html_to_markdown(html_content, page_slug=""):
"""Convert HTML to Markdown, handling images."""
soup = BeautifulSoup(html_content, 'html.parser')
# Download and replace images
for img in soup.find_all('img'):
src = img.get('src') or img.get('data-src') or img.get('data-lazy-src')
if src:
local_path = download_image(src, page_slug)
img['src'] = local_path
# Use html2text for better conversion
h = html2text.HTML2Text()
h.ignore_links = False
h.ignore_images = False
h.body_width = 0 # Don't wrap lines
h.unicode_snob = True
# Convert to markdown
markdown = h.handle(str(soup))
# Clean up
markdown = re.sub(r'\n{3,}', r'\n\n', markdown)
markdown = markdown.strip()
return markdown
def get_page_content(url):
"""Fetch and parse a page."""
if url in downloaded_urls:
return None
try:
print(f"Fetching: {url}")
response = session.get(url, timeout=30)
response.raise_for_status()
downloaded_urls.add(url)
return response.text
except Exception as e:
print(f"Error fetching {url}: {e}")
return None
def extract_links(html):
"""Extract all internal links from HTML."""
soup = BeautifulSoup(html, 'html.parser')
links = set()
for a in soup.find_all('a', href=True):
href = a['href']
if href.startswith('/') or BASE_URL in href:
if BASE_URL in href:
links.add(href)
else:
links.add(urljoin(BASE_URL, href))
return links
def create_hugo_content(title, content, url, content_type="page", date=None):
"""Create a Hugo content file."""
# Generate slug from URL
parsed = urlparse(url)
slug = parsed.path.strip('/').replace('/', '-') or 'index'
slug = sanitize_filename(slug)
# Determine file path
if content_type == "post":
file_path = CONTENT_DIR / "posts" / f"{slug}.md"
(CONTENT_DIR / "posts").mkdir(exist_ok=True)
else:
file_path = CONTENT_DIR / f"{slug}.md"
# Front matter
front_matter = f"""+++
title = "{title.replace('"', '\\"')}"
date = "{date or '2025-01-01'}"
draft = false
+++
"""
# Write file
with open(file_path, 'w', encoding='utf-8') as f:
f.write(front_matter)
f.write(content)
print(f"Created: {file_path}")
return slug
def scrape_site():
"""Main scraping function."""
print("Starting scrape of skullspace.ca...")
# Start with homepage and common blog/post URLs
to_visit = {
BASE_URL,
f"{BASE_URL}/blog/",
f"{BASE_URL}/posts/",
f"{BASE_URL}/category/",
f"{BASE_URL}/tag/",
}
visited = set()
while to_visit:
url = to_visit.pop()
if url in visited:
continue
visited.add(url)
# Skip non-HTML content
if any(url.lower().endswith(ext) for ext in ['.jpg', '.jpeg', '.png', '.gif', '.pdf', '.zip']):
continue
html = get_page_content(url)
if not html:
continue
soup = BeautifulSoup(html, 'html.parser')
# Extract title
title_tag = soup.find('title')
title = title_tag.get_text().strip() if title_tag else "Untitled"
title = title.replace(' | SkullSpace', '').replace('SkullSpace - ', '').strip()
# Extract main content
# Try common WordPress content selectors
content_selectors = [
'article',
'.entry-content',
'.post-content',
'.content',
'main',
'#content',
'.main-content'
]
content_html = None
for selector in content_selectors:
content_elem = soup.select_one(selector)
if content_elem:
content_html = str(content_elem)
break
if not content_html:
# Fallback to body
body = soup.find('body')
if body:
# Remove nav, header, footer
for tag in body.find_all(['nav', 'header', 'footer', 'script', 'style']):
tag.decompose()
content_html = str(body)
# Convert to markdown
slug = urlparse(url).path.strip('/').replace('/', '-') or 'index'
slug = sanitize_filename(slug)
content_md = html_to_markdown(content_html, slug)
# Determine if it's a post or page (simple heuristic)
is_post = any(word in url.lower() for word in ['/blog/', '/post/', '/news/', '/article/'])
content_type = "post" if is_post else "page"
# Extract date if available
date = None
date_elem = soup.find('time') or soup.find(class_=re.compile('date|published'))
if date_elem:
datetime_attr = date_elem.get('datetime') or date_elem.get_text()
if datetime_attr:
date = datetime_attr[:10] # YYYY-MM-DD
# Create Hugo content file
create_hugo_content(title, content_md, url, content_type, date)
# Extract links to visit
new_links = extract_links(html)
for link in new_links:
if BASE_URL in link and link not in visited:
to_visit.add(link)
time.sleep(1) # Be polite to the server
print(f"\nScraping complete!")
print(f"Visited {len(visited)} pages")
print(f"Downloaded {len(downloaded_images)} images")
if __name__ == "__main__":
try:
scrape_site()
except KeyboardInterrupt:
print("\nScraping interrupted by user")
sys.exit(1)
except Exception as e:
print(f"Error: {e}")
import traceback
traceback.print_exc()
sys.exit(1)