Skip to content

Commit 6aa2e7e

Browse files
committed
Adding a scrapy spider to get the data for some financial terms from wikipedia
1 parent 86135ab commit 6aa2e7e

File tree

8 files changed

+274
-0
lines changed

8 files changed

+274
-0
lines changed

data/textdata/scrapy.cfg

+11
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
# Automatically created by: scrapy startproject
2+
#
3+
# For more information about the [deploy] section see:
4+
# https://scrapyd.readthedocs.org/en/latest/deploy.html
5+
6+
[settings]
7+
default = textdata.settings
8+
9+
[deploy]
10+
#url = http://localhost:6800/
11+
project = textdata

data/textdata/textdata/__init__.py

Whitespace-only changes.

data/textdata/textdata/items.py

+14
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
# -*- coding: utf-8 -*-
2+
3+
# Define here the models for your scraped items
4+
#
5+
# See documentation in:
6+
# http://doc.scrapy.org/en/latest/topics/items.html
7+
8+
import scrapy
9+
10+
11+
class TextdataItem(scrapy.Item):
12+
# define the fields for your item here like:
13+
# name = scrapy.Field()
14+
pass

data/textdata/textdata/middlewares.py

+56
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
# -*- coding: utf-8 -*-
2+
3+
# Define here the models for your spider middleware
4+
#
5+
# See documentation in:
6+
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
7+
8+
from scrapy import signals
9+
10+
11+
class TextdataSpiderMiddleware(object):
12+
# Not all methods need to be defined. If a method is not defined,
13+
# scrapy acts as if the spider middleware does not modify the
14+
# passed objects.
15+
16+
@classmethod
17+
def from_crawler(cls, crawler):
18+
# This method is used by Scrapy to create your spiders.
19+
s = cls()
20+
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
21+
return s
22+
23+
def process_spider_input(response, spider):
24+
# Called for each response that goes through the spider
25+
# middleware and into the spider.
26+
27+
# Should return None or raise an exception.
28+
return None
29+
30+
def process_spider_output(response, result, spider):
31+
# Called with the results returned from the Spider, after
32+
# it has processed the response.
33+
34+
# Must return an iterable of Request, dict or Item objects.
35+
for i in result:
36+
yield i
37+
38+
def process_spider_exception(response, exception, spider):
39+
# Called when a spider or process_spider_input() method
40+
# (from other spider middleware) raises an exception.
41+
42+
# Should return either None or an iterable of Response, dict
43+
# or Item objects.
44+
pass
45+
46+
def process_start_requests(start_requests, spider):
47+
# Called with the start requests of the spider, and works
48+
# similarly to the process_spider_output() method, except
49+
# that it doesn’t have a response associated.
50+
51+
# Must return only requests (not items).
52+
for r in start_requests:
53+
yield r
54+
55+
def spider_opened(self, spider):
56+
spider.logger.info('Spider opened: %s' % spider.name)

data/textdata/textdata/pipelines.py

+11
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
# -*- coding: utf-8 -*-
2+
3+
# Define your item pipelines here
4+
#
5+
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
6+
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
7+
8+
9+
class TextdataPipeline(object):
10+
def process_item(self, item, spider):
11+
return item

data/textdata/textdata/settings.py

+90
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,90 @@
1+
# -*- coding: utf-8 -*-
2+
3+
# Scrapy settings for textdata project
4+
#
5+
# For simplicity, this file contains only settings considered important or
6+
# commonly used. You can find more settings consulting the documentation:
7+
#
8+
# http://doc.scrapy.org/en/latest/topics/settings.html
9+
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
10+
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
11+
12+
BOT_NAME = 'textdata'
13+
14+
SPIDER_MODULES = ['textdata.spiders']
15+
NEWSPIDER_MODULE = 'textdata.spiders'
16+
17+
18+
# Crawl responsibly by identifying yourself (and your website) on the user-agent
19+
#USER_AGENT = 'textdata (+http://www.yourdomain.com)'
20+
21+
# Obey robots.txt rules
22+
ROBOTSTXT_OBEY = True
23+
24+
# Configure maximum concurrent requests performed by Scrapy (default: 16)
25+
#CONCURRENT_REQUESTS = 32
26+
27+
# Configure a delay for requests for the same website (default: 0)
28+
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
29+
# See also autothrottle settings and docs
30+
#DOWNLOAD_DELAY = 3
31+
# The download delay setting will honor only one of:
32+
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
33+
#CONCURRENT_REQUESTS_PER_IP = 16
34+
35+
# Disable cookies (enabled by default)
36+
#COOKIES_ENABLED = False
37+
38+
# Disable Telnet Console (enabled by default)
39+
#TELNETCONSOLE_ENABLED = False
40+
41+
# Override the default request headers:
42+
#DEFAULT_REQUEST_HEADERS = {
43+
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
44+
# 'Accept-Language': 'en',
45+
#}
46+
47+
# Enable or disable spider middlewares
48+
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
49+
#SPIDER_MIDDLEWARES = {
50+
# 'textdata.middlewares.TextdataSpiderMiddleware': 543,
51+
#}
52+
53+
# Enable or disable downloader middlewares
54+
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
55+
#DOWNLOADER_MIDDLEWARES = {
56+
# 'textdata.middlewares.MyCustomDownloaderMiddleware': 543,
57+
#}
58+
59+
# Enable or disable extensions
60+
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
61+
#EXTENSIONS = {
62+
# 'scrapy.extensions.telnet.TelnetConsole': None,
63+
#}
64+
65+
# Configure item pipelines
66+
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
67+
#ITEM_PIPELINES = {
68+
# 'textdata.pipelines.TextdataPipeline': 300,
69+
#}
70+
71+
# Enable and configure the AutoThrottle extension (disabled by default)
72+
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
73+
#AUTOTHROTTLE_ENABLED = True
74+
# The initial download delay
75+
#AUTOTHROTTLE_START_DELAY = 5
76+
# The maximum download delay to be set in case of high latencies
77+
#AUTOTHROTTLE_MAX_DELAY = 60
78+
# The average number of requests Scrapy should be sending in parallel to
79+
# each remote server
80+
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
81+
# Enable showing throttling stats for every response received:
82+
#AUTOTHROTTLE_DEBUG = False
83+
84+
# Enable and configure HTTP caching (disabled by default)
85+
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
86+
#HTTPCACHE_ENABLED = True
87+
#HTTPCACHE_EXPIRATION_SECS = 0
88+
#HTTPCACHE_DIR = 'httpcache'
89+
#HTTPCACHE_IGNORE_HTTP_CODES = []
90+
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
# This package will contain the spiders of your Scrapy project
2+
#
3+
# Please refer to the documentation for information on how to create and manage
4+
# your spiders.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
import scrapy
2+
from scrapy.spiders import CrawlSpider
3+
from w3lib.html import remove_tags, remove_tags_with_content
4+
5+
6+
class WikipediaSpider(CrawlSpider):
7+
name = 'wikipedia'
8+
start_urls = ['https://en.wikipedia.org/wiki/Outline_of_finance']
9+
10+
def parse(self, response):
11+
"""
12+
Parse the response page
13+
"""
14+
url = response.url
15+
16+
if url in WikipediaSpider.start_urls:
17+
return self._parse_topic_list(response)
18+
19+
else:
20+
return self._parse_topic_response(response)
21+
22+
def _parse_topic_response(self, response):
23+
"""
24+
Parse the content
25+
"""
26+
27+
# Get the title first
28+
title = response.css('title::text').extract_first()
29+
30+
# Replace / with a space - creates issues with writing to file
31+
title = title.replace('/', ' ')
32+
33+
content = response.css('div#mw-content-text')
34+
35+
# Just extract all the '<p></p>' children from this
36+
text = ''
37+
for child in content.xpath('//p'):
38+
39+
# Get the text from this child <p></p> tag
40+
paragraph = child.extract()
41+
42+
# Remove <script>, <sup>, <math> tags with the content
43+
paragraph = remove_tags_with_content(paragraph, which_ones=('script', 'sup', 'math'))
44+
# Remove the rest of the tags without removing the content
45+
paragraph = remove_tags(paragraph)
46+
47+
# Replace '&amp;' with '&'
48+
paragraph = paragraph.replace('&amp;', '&')
49+
50+
# Replace 'U.S.' with 'US':
51+
paragraph = paragraph.replace('U.S.', 'US')
52+
53+
# Some more replacements to improve the default tokenization
54+
for c in '();.,[]"\'-:/%$+nnn':
55+
paragraph = paragraph.replace(c, ' {} '.format(c))
56+
57+
# Add to the file
58+
text += paragraph + '\n'
59+
60+
filename = 'wiki_data.txt'
61+
f = open(filename, 'a')
62+
f.write(text)
63+
f.close()
64+
65+
def _parse_topic_list(self, response):
66+
"""
67+
Parse various topics from the list of topics
68+
"""
69+
70+
# All of the links on this pages are in the bullet points
71+
# Therefore, extract the 'ul' tags to get the list
72+
content = response.css('div#mw-content-text')
73+
lists = content.css('ul')
74+
75+
# Iterate through each list
76+
for ul in lists:
77+
78+
# Iterate through each list item
79+
for l in ul.css('li'):
80+
# Extract the URL
81+
url = l.css('a::attr(href)').extract_first()
82+
83+
# Skip external links as well as the links to the same page (e.g. TOC)
84+
if url is None or 'wiki' not in url:
85+
continue
86+
87+
next_page = response.urljoin(url)
88+
yield scrapy.Request(next_page, callback=self.parse)

0 commit comments

Comments
 (0)