-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathscrapeAll.py
More file actions
222 lines (182 loc) Β· 7.74 KB
/
scrapeAll.py
File metadata and controls
222 lines (182 loc) Β· 7.74 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
import requests
from bs4 import BeautifulSoup
import json
import re
import time
import csv
import os
from urllib.parse import urlparse, parse_qs
from concurrent.futures import ThreadPoolExecutor, as_completed
from threading import Lock
BASE = "https://www.medpages.info/sf/"
HEADERS = {"User-Agent": "Mozilla/5.0 (compatible; MedpagesScraper/1.0)"}
# Thread-safe counter and lock for progress tracking
progress_lock = Lock()
progress_counter = 0
def extract_people(html):
"""Parse one Medpages listing page and return list of dicts."""
soup = BeautifulSoup(html, "html.parser")
people = []
# Check if there are no details
if "No Details" in soup.text:
return []
# Find each result record
for section in soup.select("section.result-record"):
person = {}
h2 = section.find("h2")
h3 = section.find("h3")
h4 = section.find("h4")
desc = section.find("p")
link = section.find("a", href=re.compile("page=person"))
person["name"] = h2.get_text(strip=True) if h2 else None
person["title"] = h3.get_text(strip=True) if h3 else None
person["location"] = h4.get_text(strip=True) if h4 else None
person["description"] = desc.get_text(strip=True) if desc else None
if link:
person["profile_url"] = BASE + link["href"]
people.append(person)
# Optional: extract coordinates from JS var locations
js_match = re.search(r"var locations\s*=\s*(\[.*?\]);", html, re.S)
if js_match:
try:
import ast
coords = ast.literal_eval(js_match.group(1))
for i, loc in enumerate(coords):
if i < len(people):
people[i]["latitude"] = loc[0]
people[i]["longitude"] = loc[1]
except Exception:
pass
return people
def extract_pagination(html):
"""Return list of all page numbers."""
soup = BeautifulSoup(html, "html.parser")
pages = []
for a in soup.select("ul.pagination a"):
m = re.search(r"pageno=(\d+)", a["href"])
if m:
pages.append(int(m.group(1)))
return sorted(set(pages))
def extract_service_code(url):
"""Extract service code from URL - get the LAST servicecode parameter."""
# Find all servicecode occurrences
matches = re.findall(r'servicecode=(\d+)', url)
if matches:
# Return the LAST service code found (the actual service, not the parent category)
return matches[-1]
return None
def scrape_listing(service_name, service_code, total_services, delay=0.5):
"""Scrape all pages for a given service code."""
global progress_counter
base_url = f"{BASE}index.php?page=listing&servicecode={service_code}&countryid=1®ioncode=0&subregioncode=0&suburbcode=0"
with progress_lock:
progress_counter += 1
current = progress_counter
print(f"\n[{current}/{total_services}] π Scraping {service_name} (code: {service_code})...")
try:
r = requests.get(base_url, headers=HEADERS, timeout=30)
if r.status_code != 200:
print(f" β Failed to fetch (status {r.status_code})")
return service_name, []
html = r.text
all_people = extract_people(html)
pages = extract_pagination(html)
if not all_people and not pages:
print(f" βΉοΈ No results found")
return service_name, []
print(f" β Found {len(all_people)} results on page 1")
for p in pages[1:]: # skip first page
url = f"{base_url}&pageno={p}"
time.sleep(delay)
print(f" π Scraping page {p}...")
r = requests.get(url, headers=HEADERS, timeout=30)
if r.status_code == 200:
page_people = extract_people(r.text)
all_people.extend(page_people)
print(f" β Found {len(page_people)} results")
print(f" β
Total: {len(all_people)} results for {service_name}")
return service_name, all_people
except Exception as e:
print(f" β Error: {str(e)}")
return service_name, []
def load_services_from_csv(csv_file):
"""Load service names and codes from CSV file."""
services = []
with open(csv_file, 'r', encoding='utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
name = row['Name']
url = row['Full URL']
service_code = extract_service_code(url)
if service_code:
services.append({
'name': name,
'code': service_code
})
else:
print(f"β οΈ Could not extract service code from {name}: {url}")
return services
def main():
"""Main scraping logic with parallel processing."""
global progress_counter
progress_counter = 0
print("=" * 70)
print("π Starting Medpages Scraper (Parallel Processing)")
print("=" * 70)
# Detect CPU cores
cpu_count = os.cpu_count() or 4
max_workers = min(cpu_count * 2, 16) # Use 2x CPU cores, max 16 threads
print(f"π» System Info:")
print(f" - CPU Cores: {cpu_count}")
print(f" - Worker Threads: {max_workers}")
# Load services from both CSV files
print("\nπ Loading services from CSV files...")
services_full = load_services_from_csv("medpages_full_links.csv")
services_mental = load_services_from_csv("medpages_mental_health.csv")
# Combine and deduplicate services
all_services = {}
for service in services_full + services_mental:
# Use service code as key to avoid duplicates
all_services[service['code']] = service['name']
print(f"β Loaded {len(all_services)} unique services")
# Scrape all services in parallel
all_data = {}
total_services = len(all_services)
print(f"\nπ Starting parallel scraping with {max_workers} workers...")
print("=" * 70)
start_time = time.time()
with ThreadPoolExecutor(max_workers=max_workers) as executor:
# Submit all tasks
future_to_service = {
executor.submit(scrape_listing, service_name, service_code, total_services):
(service_code, service_name)
for service_code, service_name in all_services.items()
}
# Process completed tasks
for future in as_completed(future_to_service):
service_code, service_name = future_to_service[future]
try:
result_name, people = future.result()
if people:
all_data[result_name] = people
except Exception as e:
print(f" β Exception for {service_name}: {str(e)}")
elapsed_time = time.time() - start_time
# Save to JSON
output_file = "medpages_all_data.json"
print(f"\n{'=' * 70}")
print(f"πΎ Saving data to {output_file}...")
with open(output_file, "w", encoding="utf-8") as f:
json.dump(all_data, f, indent=2, ensure_ascii=False)
# Print summary
total_people = sum(len(people) for people in all_data.values())
print(f"β
Done!")
print(f"π Summary:")
print(f" - Services scraped: {len(all_data)}")
print(f" - Total professionals: {total_people}")
print(f" - Time elapsed: {elapsed_time:.2f} seconds")
print(f" - Average time per service: {elapsed_time/total_services:.2f} seconds")
print(f" - Output file: {output_file}")
print("=" * 70)
if __name__ == "__main__":
main()