-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathscraper.py
174 lines (155 loc) · 6.07 KB
/
scraper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
import re
import json
from datetime import datetime
import requests
import koogle_config
from loguru import logger
class Scraper:
def __init__(self):
self._max_get_response_try_num = koogle_config._max_get_response_try_num
self._default_cookies = koogle_config._default_cookies
self._default_headers = koogle_config._default_headers
self._default_proxies = koogle_config._default_proxies
self.logger:logger = logger
def _json_value(self, data, *args, default=None):
cur = data
for a in args:
try:
if isinstance(a, int):
cur = cur[a]
else:
cur = cur.get(a)
except (IndexError, KeyError, TypeError, AttributeError):
return default
return cur
def _get_response(self, **kwargs):
_max_get_response_try_num = self._max_get_response_try_num
response_try_num = 0
while True:
response_try_num += 1
try:
kwargs["proxies"] = {
"http": f"http://{self._default_proxies}",
"https": f"http://{self._default_proxies}"
}
response = requests.get(**kwargs)
except Exception as e:
self.logger.info(f"request error, response_try_num:{response_try_num}")
if response_try_num < _max_get_response_try_num:
continue
else:
raise ConnectionError(f"error: {e}")
if "Unauthorized access to internal API" in response.text:
raise ConnectionRefusedError("Unauthorized access to internal API")
if response.status_code == 200:
return response.text
if response_try_num >= _max_get_response_try_num:
raise ConnectionError(f"error: {response.status_code}//{response.text}")
def _parse_data(self, data, parse_type):
pass
#pase your data
# if parse_type == "your parse type":
#
# parsed_data = json.loads(data)
#
# results = self._json_value(parsed_data, "results")
#
# for row in results:
# value = self._json_value(row, "key")
#
# parsed_data = {
# "key": value,
# }
# yield parsed_data
def _generate(self, results):
pass
def get_data_by_keyword(self, start, keyword):
params = {
"rsz": "filtered_cse",
"num": "10",
"start": start,
"q": keyword,
"hl": "ko",
"source": "gcsc",
"gss": ".io",
"cselibv": "827890a761694e44",
"cx": "010593175421032702917:f7zuzysul9w",
"safe": "off",
"cse_tok": "AFW0emxsFhJRYHXpY4-3ZUEgTpcU:1687932436810",
"exp": "csqr, cc, bf",
"callback": "google.search.cse.api19585"
}
url = "https://cse.google.com/cse/element/v1"
cookies = self._default_cookies
headers = self._default_headers
proxies = {
"http": f"http://{self._default_proxies}",
"https": f"https://{self._default_proxies}"
}
data = self._get_response(url=url, params=params, headers=headers, cookies=cookies, proxies=proxies)
results = self._parse_data(data=data, parse_type="get_data_by_keyword")
yield from self._generate(results)
class NewsHadaScraper(Scraper):
def init(self):
self.last_index = 0
self.keyword = None
super().__init__()
def _parse_data(self, data, parse_type):
if parse_type == "get_data_by_keyword":
data = data.replace("/*O_o*/\n", "")
data = data.replace("google.search.cse.api19585(", "")
data = data.replace(");", "")
parsed_data = json.loads(data)
last_index = self._json_value(parsed_data, "cursor", "resultCount")
last_index = last_index.replace(",", "")
self.last_index = int(last_index)
results = self._json_value(parsed_data, "results")
return results
def _generate(self, results):
for row in results:
publish_date = None
content = self._json_value(row, "contentNoFormatting")
match_date_string = re.search(r'\d{4}. \d{1,2}. \d{1,2}.', content)
if match_date_string:
match_date = datetime.strptime(match_date_string.group(), "%Y. %m. %d.").date()
publish_date = match_date
title = self._json_value(row, "titleNoFormatting")
title = title.replace(" | GeekNews", "")
url = self._json_value(row, "url")
parsed_data = {
"keyword": self.keyword,
"title": title,
"content": content,
"publish_date": publish_date,
"url": url
}
yield parsed_data
def get_data_by_keyword(self, start, keyword):
self.keyword = keyword
params = {
"rsz": "filtered_cse",
"num": "10",
"start": start,
"q": keyword,
"hl": "ko",
"source": "gcsc",
"gss": ".io",
"cselibv": "827890a761694e44",
"cx": "010593175421032702917:f7zuzysul9w",
"safe": "off",
"cse_tok": "AFW0emxsFhJRYHXpY4-3ZUEgTpcU:1687932436810",
"exp": "csqr, cc, bf",
"callback": "google.search.cse.api19585"
}
url = "https://cse.google.com/cse/element/v1"
cookies = self._default_cookies
headers = self._default_headers
data = self._get_response(url=url, params=params, headers=headers, cookies=cookies, timeout=10)
results = self._parse_data(data=data, parse_type="get_data_by_keyword")
yield from self._generate(results)
if __name__ == "__main__":
scraper = Scraper()
start = 10
keyword = "k8s"
data = scraper.get_data_by_keyword(start=start, keyword=keyword)
print(data)