Skip to content

Commit 56b8a79

Browse files
refactor: PEP8 Conventions Adjusts
1 parent 9518787 commit 56b8a79

File tree

9 files changed

+63
-64
lines changed

9 files changed

+63
-64
lines changed

etl/common/utils/logs.py

+22-19
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,10 @@
11
import logging
22
import os
33

4-
LOG_FORMAT='%(asctime)s :: %(levelname)s :: %(message)s'
4+
LOG_FORMAT = "%(asctime)s :: %(levelname)s :: %(message)s"
55

6-
def consoleLogger(module):
6+
7+
def ConsoleLogger(module):
78
"""
89
Creates a console logger for logging messages to the console and a log file.
910
@@ -15,29 +16,30 @@ def consoleLogger(module):
1516
"""
1617
dir_name = f"etl/common/logs/"
1718
os.makedirs(dir_name, exist_ok=True)
18-
19+
1920
with open(dir_name + f"{module}.log", "w") as f:
2021
f.write("")
21-
22+
2223
logging.basicConfig(
2324
filename=dir_name + f"{module}.log",
2425
level=logging.DEBUG,
2526
format=LOG_FORMAT,
26-
datefmt='%Y-%m-%d %H:%M:%S'
27+
datefmt="%Y-%m-%d %H:%M:%S",
2728
)
28-
29+
2930
consoleLog = logging.getLogger("consoleLogger")
3031
consoleLog.setLevel(logging.INFO)
3132
ch = logging.StreamHandler()
3233
ch.setLevel(logging.INFO)
3334
formatter = logging.Formatter(LOG_FORMAT)
3435
ch.setFormatter(formatter)
35-
36+
3637
consoleLog.addHandler(ch)
37-
38+
3839
return consoleLog
39-
40-
def loggingInfo(msg, module):
40+
41+
42+
def logging_info(msg, module):
4143
"""
4244
Logs an informational message.
4345
@@ -51,12 +53,12 @@ def loggingInfo(msg, module):
5153
if logging.getLogger("consoleLogger").hasHandlers():
5254
logger = logging.getLogger("consoleLogger")
5355
else:
54-
logger = consoleLogger(module=module)
55-
56+
logger = ConsoleLogger(module=module)
57+
5658
logger.info(msg=msg)
5759

5860

59-
def loggingError(msg, module):
61+
def logging_error(msg, module):
6062
"""
6163
Logs an error message.
6264
@@ -70,11 +72,12 @@ def loggingError(msg, module):
7072
if logging.getLogger("consoleLogger").hasHandlers():
7173
logger = logging.getLogger("consoleLogger")
7274
else:
73-
logger = consoleLogger(module=module)
74-
75+
logger = ConsoleLogger(module=module)
76+
7577
logger.error(msg=msg)
76-
77-
def loggingWarn(msg,module):
78+
79+
80+
def logging_warn(msg, module):
7881
"""
7982
Logs a warning message.
8083
@@ -88,6 +91,6 @@ def loggingWarn(msg,module):
8891
if logging.getLogger("consoleLogger").hasHandlers():
8992
logger = logging.getLogger("consoleLogger")
9093
else:
91-
logger = consoleLogger(module=module)
92-
94+
logger = ConsoleLogger(module=module)
95+
9396
logger.warning(msg=msg)

etl/config/logFile.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
import os
22

33

4-
def logFileName(file: str) -> str:
4+
def log_file_name(file: str) -> str:
55
current_dir = os.path.dirname(os.path.relpath(file))
6-
WORK_DIR = current_dir.split("/")[-1:][0]
7-
return WORK_DIR
6+
return current_dir.split("/")[-1:][0]

etl/models/extract/params_validator.py

+5-6
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,16 @@
11
import requests
2-
from etl.common.utils.logs import loggingWarn
2+
from etl.common.utils.logs import logging_warn
33
from etl.config.datasource import API
4-
from etl.config.logFile import logFileName
4+
from etl.config.logFile import log_file_name
55

6-
WORK_DIR = logFileName(file=__file__)
6+
WORK_DIR = log_file_name(file=__file__)
77

88

99
class ParamsValidator:
1010
def __init__(self, params: list) -> None:
1111
self.params = params
12-
# self.validParams = self.__ValidParamsForCall__()
1312

14-
def ValidParamsForCall(self) -> list:
13+
def valid_params_for_call(self) -> list:
1514
"""
1615
Returns a list of valid parameters for the pipeline execution.
1716
@@ -26,7 +25,7 @@ def ValidParamsForCall(self) -> list:
2625
if param in AvaliableList:
2726
valParams.append(param)
2827
else:
29-
loggingWarn(f"Param: {param} is not valid for call", WORK_DIR)
28+
logging_warn(f"Param: {param} is not valid for call", WORK_DIR)
3029

3130
if valParams:
3231
return valParams

etl/models/extract/test_api_data_extractor.py

+6-5
Original file line numberDiff line numberDiff line change
@@ -3,12 +3,13 @@
33

44
def test_extraction_init():
55
params = ["USD-BRL", "USD-BRLT", "CAD-BRL"]
6-
ext = extraction(params)
7-
assert ext.ValidParams == params
6+
extractor = extraction(params)
7+
response, valid_params = extractor.run()
8+
assert valid_params == params
89

910

1011
def test_extraction_run_success():
1112
params = ["USD-BRL", "USD-BRLT", "CAD-BRL"]
12-
ext = extraction(params)
13-
json_data = ext.__run__(ext.ValidParams)
14-
assert isinstance(json_data, dict)
13+
extractor = extraction(params)
14+
response, valid_params = extractor.run()
15+
assert isinstance(response, dict)

etl/models/extract/test_params_validator.py

+3-4
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
from matplotlib.rcsetup import validate_ps_distiller
21
import pytest
32
from etl.models.extract.params_validator import ParamsValidator
43

@@ -20,19 +19,19 @@ def mixed_params():
2019

2120
def test_valid_params(valid_params):
2221
validator = ParamsValidator(valid_params)
23-
valid = validator.ValidParamsForCall()
22+
valid = validator.valid_params_for_call()
2423
assert validator.params == valid_params
2524
assert valid_params == valid
2625

2726

2827
def test_invalid_params(invalid_params):
2928
validator = ParamsValidator(invalid_params)
3029
with pytest.raises(KeyError):
31-
validator.ValidParamsForCall()
30+
validator.valid_params_for_call()
3231

3332

3433
def test_mixed_params(mixed_params, valid_params):
3534
validator = ParamsValidator(mixed_params)
36-
validated = validator.ValidParamsForCall()
35+
validated = validator.valid_params_for_call()
3736
assert validator.params == mixed_params
3837
assert validated == valid_params

etl/models/load/parquet_loader.py

+5-6
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,15 @@
1-
from math import e
21
from tqdm import tqdm
32
import pandas as pd
43

5-
from etl.config.logFile import logFileName
6-
from etl.common.utils.logs import loggingError, loggingInfo
4+
from etl.config.logFile import log_file_name
5+
from etl.common.utils.logs import logging_error, logging_info
76
from etl.common.utils.common import (
87
DefaultTimestampStr,
98
DefaultOutputFolder,
109
DefaultUTCDatetime,
1110
)
1211

13-
WORK_DIR = logFileName(file=__file__)
12+
dir = log_file_name(file=__file__)
1413

1514

1615
class load:
@@ -24,7 +23,7 @@ def run(self):
2423
df = pd.DataFrame([self.dic])
2524

2625
if df.empty:
27-
loggingError("DataFrame is empty", WORK_DIR)
26+
logging_error("DataFrame is empty", dir)
2827
raise ValueError("DataFrame is empty")
2928

3029
# Add new columns to the DataFrame
@@ -37,7 +36,7 @@ def run(self):
3736
try:
3837
df.to_parquet(f"{DefaultOutputFolder()}{param}-{ts}.parquet")
3938
except Exception as e:
40-
loggingError(f"Error writing parquet file: {e}", WORK_DIR)
39+
logging_error(f"Error writing parquet file: {e}", dir)
4140

4241
# Append list with the file path
4342
extracted_files.append(f"{param}-{ts}.parquet")

etl/models/transform/publisher.py

+8-9
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,21 @@
11
import time
2+
import queue
23
from tqdm import tqdm
3-
from etl.common.utils.logs import loggingInfo
4-
from etl.config.logFile import logFileName
4+
from etl.config.logFile import log_file_name
55

6-
7-
WORK_DIR = logFileName(file=__file__)
6+
WORK_DIR = log_file_name(file=__file__)
87

98

109
class transformation:
11-
def __init__(self, json_response: dict, params, fila: object):
10+
def __init__(self, json_response: dict, params, queue: queue.Queue):
1211
self.json_response = json_response
13-
self.validParams = params
14-
self.fila = fila
12+
self.valid_params = params
13+
self.queue = queue
1514

1615
def publish(self):
1716
for param in tqdm(
18-
self.validParams, total=len(self.validParams), desc="Producing Data"
17+
self.valid_params, total=len(self.valid_params), desc="Producing Data"
1918
):
2019
dic = self.json_response[param.replace("-", "")]
2120
time.sleep(0.2)
22-
self.fila.put(dic) # type: ignore
21+
self.queue.put(dic)

etl/run.py

+10-10
Original file line numberDiff line numberDiff line change
@@ -8,28 +8,28 @@
88
start = time.time()
99

1010

11-
def GenerateRandomParams(ParamsQty: int) -> list:
11+
def generate_random_params(params_qty: int) -> list:
1212
"""
1313
Generate a list of random parameters from the available list of parities.
1414
1515
Args:
16-
ParamsQty (int): The number of random parameters to generate.
16+
params_qty (int): The number of random parameters to generate.
1717
1818
Returns:
1919
list: A list of randomly generated parameters.
2020
"""
21-
AvaliableList = list(requests.get(API.ENDPOINT_AVALIABLE_PARITIES).json())
22-
min = random.randint(0, len(AvaliableList) - ParamsQty)
23-
max = min + ParamsQty
24-
if max > len(AvaliableList):
25-
max = len(AvaliableList)
26-
if ParamsQty == len(AvaliableList):
21+
avaliable_list = list(requests.get(API.ENDPOINT_AVALIABLE_PARITIES).json())
22+
min = random.randint(0, len(avaliable_list) - params_qty)
23+
max = min + params_qty
24+
if max > len(avaliable_list):
25+
max = len(avaliable_list)
26+
if params_qty == len(avaliable_list):
2727
max -= max
28-
return AvaliableList[min: max - 1]
28+
return avaliable_list[min : max - 1]
2929

3030

3131
def main(total_files: int = 2):
32-
NewExec = PipelineExecutor(*GenerateRandomParams(total_files))
32+
NewExec = PipelineExecutor(*generate_random_params(total_files))
3333
NewExec.pipeline_run()
3434

3535

etl/test_run.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import pytest
22
import requests
3-
from etl.run import GenerateRandomParams
3+
from etl.run import generate_random_params
44

55

66
def test_generate_random_params():
@@ -9,7 +9,7 @@ def test_generate_random_params():
99
requests.get = lambda url: MockResponse(mock_response)
1010

1111
params_qty = 3
12-
result = GenerateRandomParams(params_qty)
12+
result = generate_random_params(params_qty)
1313

1414
assert isinstance(result, list)
1515
assert len(result) == params_qty - 1

0 commit comments

Comments
 (0)