1
+ from .save_data import export_to_db , select_from_db , export_to_csv
1
2
from lxml import html
2
3
from lxml import etree
3
4
import finviz .request_functions as send
4
5
import finviz .scraper_functions as scrape
5
-
6
+ from os import getcwd
6
7
7
8
class Screener (object ):
8
9
9
- def __init__ (self , tickers = None , filters = None , rows = None , order = '' , signal = '' , table = 'Overview' ):
10
+ def __init__ (self , tickers = None , filters = None , order = '' , rows = None , signal = '' , table = 'Overview' ):
10
11
11
12
if tickers is None :
12
13
self .tickers = []
@@ -32,15 +33,42 @@ def __init__(self, tickers=None, filters=None, rows=None, order='', signal='', t
32
33
33
34
def to_csv (self , directory = None ):
34
35
35
- from .save_data import export_to_csv
36
-
37
36
if directory is None :
38
-
39
- import os
40
- directory = os .getcwd ()
37
+ directory = getcwd ()
41
38
42
39
export_to_csv (self .headers , self .data , directory )
43
40
41
+ def to_sqlite (self ):
42
+ export_to_db (self .headers , self .data )
43
+
44
+ def display_db (self ):
45
+ select_from_db ()
46
+
47
+ def __get_total_rows (self ):
48
+
49
+ total_element = self .page_content .cssselect ('td[width="140"]' )
50
+ self .rows = int (etree .tostring (total_element [0 ]).decode ("utf-8" ).split ('</b>' )[1 ].split (' ' )[0 ])
51
+
52
+ def __get_page_urls (self ):
53
+
54
+ try :
55
+ total_pages = int ([i .text .split ('/' )[1 ] for i in self .page_content .cssselect ('option[value="1"]' )][0 ])
56
+ except IndexError : # No results found
57
+ return None
58
+
59
+ urls = []
60
+
61
+ for page_number in range (1 , total_pages + 1 ):
62
+
63
+ sequence = 1 + (page_number - 1 ) * 20
64
+
65
+ if sequence - 20 <= self .rows < sequence :
66
+ break
67
+ else :
68
+ urls .append (self .url + '&r={}' .format (str (sequence )))
69
+
70
+ self .page_urls = urls
71
+
44
72
def __get_table_headers (self ):
45
73
46
74
first_row = self .page_content .cssselect ('tr[valign="middle"]' )
0 commit comments