-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathNews Aggregator.py
More file actions
105 lines (84 loc) Β· 3.79 KB
/
News Aggregator.py
File metadata and controls
105 lines (84 loc) Β· 3.79 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import time
from bs4 import BeautifulSoup
import requests
from datetime import datetime
'''This is the simple news fatching code, from more than 1 website and show the appropriate news with current date_time'''
# website are (bbc news and theguardian) for news fatching
class live_news:
def d_live_news(self):
url = 'https://www.theguardian.com/us-news/us-politics'
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
find = soup.find_all('span', class_='show-underline dcr-uyefka')
print("-----------------------US POLITICS/ theguardian / bbc-----------------------")
print(f"\nπ
Updated at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
for con in find:
print(f"=> {con.text}")
def bbc_livenews(self):
url = 'https://www.bbc.com/news/us-canada'
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
find = soup.find_all('div', class_='sc-9d830f2a-0 eKWlJZ')
print('----------------------------bbc---------------------------')
print(f"\nπ
Updated at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
for con in find:
print(f"=> {con.text}")
class sports:
def d_sports(self):
url = 'https://www.theguardian.com/uk/sport'
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
find = soup.find_all('span', class_='show-underline dcr-uyefka')
print("-----------------------SPORTS theguardian / bbc-----------------------")
print(f"\nπ
Updated at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
for con in find:
print(f"=> {con.text}")
def bbc_sports(self):
url = 'https://www.bbc.com/sport'
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
find = soup.find_all('p', class_='ssrcss-1b1mki6-PromoHeadline exn3ah910')
print('----------------------------bbc---------------------------')
print(f"\nπ
Updated at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
for con in find:
print(f"=> {con.text}")
class culture:
def d_culture(self):
url = 'https://www.theguardian.com/uk/culture'
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
find = soup.find_all('span', class_='show-underline dcr-uyefka')
print("-----------------------CULTURE theguardian / bbc-----------------------")
print(f"\nπ
Updated at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
for con in find:
print(f"=> {con.text}")
def bbc_culture(self):
url = 'https://www.bbc.com/culture'
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
find = soup.find_all('h2', class_='sc-9d830f2a-3 fWzToZ')
print('----------------------------bbc---------------------------')
print(f"\nπ
Updated at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
for con in find:
print(f"=> {con.text}")
# here the news are automatically updated in every few sec to avoid delay
if __name__=='__main__':
while True:
try:
d_live = live_news()
d_live.d_live_news()
time.sleep(4)
d_live.bbc_livenews()
s_live = sports()
time.sleep(4)
s_live.d_sports()
time.sleep(4)
s_live.bbc_sports()
c_live = culture()
time.sleep(4)
c_live.d_culture()
time.sleep(4)
c_live.bbc_culture()
time.sleep(4)
except requests.RequestException as e:
print(f"Error {e}")