Skip to content

Commit 2363f76

Browse files
committed
chore: lint
1 parent 464b820 commit 2363f76

File tree

5 files changed

+112
-106
lines changed

5 files changed

+112
-106
lines changed

Makefile

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -47,9 +47,9 @@ tests:
4747
poetry run pytest ${args}
4848

4949
lint:
50-
poetry run autoflake --recursive --in-place --remove-all-unused-imports --remove-unused-variables query
51-
poetry run isort --profile black query
52-
poetry run black query
50+
poetry run autoflake --recursive --in-place --remove-all-unused-imports --remove-unused-variables query scripts
51+
poetry run isort --profile black query scripts
52+
poetry run black query scripts
5353

5454
# Refresh the countries.json file from the ProductOwner taxonomy
5555
refresh_countries:

query/tables/product_tags.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
The order of tags is not preserved"""
33

44
from ..database import create_record, get_rows_affected
5-
from .product_tags_list import COUNTRIES_TAG, TAG_TABLES
5+
from .product_tags_list import TAG_TABLES, tag_tables_v1
66

77

88
async def create_tables(transaction, tag_tables):

query/tables/product_tags_list.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
"""List of tags tables
2-
"""
1+
"""List of tags tables"""
2+
33
# we keep it in this simple module to enable easy import eg. for export script.
44

55
COUNTRIES_TAG = "countries_tags"
@@ -70,6 +70,6 @@
7070
"weighers_tags": "product_weighers_tag",
7171
}
7272

73-
# Append additional tag tables to this list when we introduce them
73+
# Append additional tag tables to this list when we introduce them
7474
# and then add a migration to create the new tables
7575
TAG_TABLES = tag_tables_v1

scripts/export_db_sample.py

Lines changed: 59 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,12 @@
1-
#/usr/bin/env python3
1+
# /usr/bin/env python3
22
import argparse
3-
import sys
43
import datetime as dt
4+
import sys
55
from textwrap import dedent
66

77
sys.path.append(".")
88
from query.tables import product_tags_list
99

10-
1110
DESCRIPTION = """
1211
Generate a script to export some data from openfoodfacts-query database,
1312
starting from the event table.
@@ -26,24 +25,24 @@
2625

2726
def generate_export_script(from_date, to_date, export_dir):
2827

29-
outputs = []
28+
outputs = []
3029

31-
def cmd(sql):
32-
outputs.append(dedent(sql))
30+
def cmd(sql):
31+
outputs.append(dedent(sql))
3332

34-
cmd(
35-
f"""
33+
cmd(
34+
f"""
3635
\\! mkdir -p {export_dir}
3736
-- enable writing by postgres process
3837
\\! chmod a+rwX {export_dir}
3938
\\set export_dir '{export_dir}'
4039
\\set from_date '{from_date}'
4140
\\set to_date '{to_date}'
4241
"""
43-
)
42+
)
4443

45-
cmd(
46-
"""
44+
cmd(
45+
"""
4746
\\set export_path :export_dir/product_update_event.csv
4847
4948
COPY (
@@ -53,10 +52,10 @@ def cmd(sql):
5352
)
5453
TO :'export_path' DELIMITER ',' CSV HEADER;
5554
"""
56-
)
55+
)
5756

58-
cmd(
59-
"""
57+
cmd(
58+
"""
6059
\\set export_path :export_dir/product_update.csv
6160
6261
COPY (
@@ -68,9 +67,9 @@ def cmd(sql):
6867
)
6968
TO :'export_path' DELIMITER ',' CSV HEADER;
7069
"""
71-
)
72-
cmd(
73-
"""
70+
)
71+
cmd(
72+
"""
7473
\\set export_path :export_dir/product.csv
7574
COPY (
7675
select * from product
@@ -84,11 +83,11 @@ def cmd(sql):
8483
)
8584
TO :'export_path' DELIMITER ',' CSV HEADER;
8685
"""
87-
)
88-
# should use product_tags.TAG_TABLES
89-
for table_name in product_tags_list.TAG_TABLES.values():
90-
cmd(
91-
f"""
86+
)
87+
# should use product_tags.TAG_TABLES
88+
for table_name in product_tags_list.TAG_TABLES.values():
89+
cmd(
90+
f"""
9291
\\set table_name {table_name}
9392
\\set export_path :export_dir/:table_name.csv
9493
@@ -104,44 +103,49 @@ def cmd(sql):
104103
)
105104
TO :'export_path' DELIMITER ',' CSV HEADER;
106105
"""
107-
)
108-
return outputs
106+
)
107+
return outputs
109108

110109

111110
def get_parser():
112-
now = dt.datetime.now()
113-
default_start = (now - dt.timedelta(minutes=20)).strftime("%Y-%m-%d %H:%M:%S")
114-
default_end = now.strftime("%Y-%m-%d %H:%M:%S")
115-
default_folder="/opt/data/exports/" + now.strftime("%Y-%m-%d_%H:%M:%S")
116-
parser = argparse.ArgumentParser(description=DESCRIPTION)
117-
parser.add_argument(
118-
'from_date', default=default_start, type=str, nargs="?",
119-
help=(
120-
"Start date in iso format, e.g. 2025-11-21 11:00:00\n" +
121-
"If empty, it's now - 20 minutes"
111+
now = dt.datetime.now()
112+
default_start = (now - dt.timedelta(minutes=20)).strftime("%Y-%m-%d %H:%M:%S")
113+
default_end = now.strftime("%Y-%m-%d %H:%M:%S")
114+
default_folder = "/opt/data/exports/" + now.strftime("%Y-%m-%d_%H:%M:%S")
115+
parser = argparse.ArgumentParser(description=DESCRIPTION)
116+
parser.add_argument(
117+
"from_date",
118+
default=default_start,
119+
type=str,
120+
nargs="?",
121+
help=(
122+
"Start date in iso format, e.g. 2025-11-21 11:00:00\n"
123+
+ "If empty, it's now - 20 minutes"
124+
),
125+
)
126+
parser.add_argument(
127+
"to_date",
128+
default=default_end,
129+
type=str,
130+
nargs="?",
131+
help=(
132+
"End date in iso format, e.g. 2025-11-21 11:20:00\n" + "If empty, it's now"
133+
),
122134
)
123-
)
124-
parser.add_argument(
125-
'to_date', default=default_end, type=str, nargs="?",
126-
help=(
127-
"End date in iso format, e.g. 2025-11-21 11:20:00\n" +
128-
"If empty, it's now"
135+
parser.add_argument(
136+
"--dest",
137+
default=default_folder,
138+
type=str,
139+
help=(
140+
"Target directory, it will be created if it does not exists,"
141+
+ f"defaults to {default_folder} (according to current time)"
142+
),
129143
)
130-
)
131-
parser.add_argument(
132-
'--dest', default=default_folder, type=str,
133-
help=(
134-
"Target directory, it will be created if it does not exists," +
135-
f"defaults to {default_folder} (according to current time)"
136-
),
137-
)
138-
return parser
144+
return parser
139145

140146

141147
if __name__ == "__main__":
142-
parser = get_parser()
143-
args = parser.parse_args()
144-
outputs = generate_export_script(args.from_date, args.to_date, args.dest)
145-
print("\n".join(outputs))
146-
147-
148+
parser = get_parser()
149+
args = parser.parse_args()
150+
outputs = generate_export_script(args.from_date, args.to_date, args.dest)
151+
print("\n".join(outputs))

scripts/import_db_sample.py

Lines changed: 46 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,7 @@
1-
#/usr/bin/env python3
1+
# /usr/bin/env python3
22
import argparse
33
import csv
44
import glob
5-
import sys
65
from textwrap import dedent as _d
76

87
DESCRIPTION = """
@@ -18,57 +17,60 @@
1817
It is intended to have data for local development.
1918
"""
2019

20+
2121
def generate_import_script(source_dir, docker_dir):
22-
outputs = []
22+
outputs = []
2323

24-
def cmd(sql):
25-
outputs.append(_d(sql))
24+
def cmd(sql):
25+
outputs.append(_d(sql))
2626

27-
def priority(file_name):
28-
if file_name.endswith("product.csv"):
29-
return 0
30-
if file_name.endswith("product_update.csv"):
31-
return 1
32-
if file_name.endswith("product_update_event.csv"):
33-
return 2
34-
if file_name.endswith("_tag.csv"):
35-
return 10
36-
return 100
27+
def priority(file_name):
28+
if file_name.endswith("product.csv"):
29+
return 0
30+
if file_name.endswith("product_update.csv"):
31+
return 1
32+
if file_name.endswith("product_update_event.csv"):
33+
return 2
34+
if file_name.endswith("_tag.csv"):
35+
return 10
36+
return 100
3737

38-
files = glob.glob(f"{source_dir}/*.csv")
39-
files = sorted(files, key=priority)
38+
files = glob.glob(f"{source_dir}/*.csv")
39+
files = sorted(files, key=priority)
4040

41-
for file in files:
42-
docker_path = file.replace(source_dir, docker_dir)
41+
for file in files:
42+
docker_path = file.replace(source_dir, docker_dir)
4343

44-
table_name = file.split("/")[-1].split(".")[0]
45-
csv_reader = csv.reader(open(file), delimiter=",")
46-
# get column names from csv,
47-
# because they might not have same order
48-
colnames = ",".join(next(csv_reader))
49-
cmd(
50-
f"""
44+
table_name = file.split("/")[-1].split(".")[0]
45+
csv_reader = csv.reader(open(file), delimiter=",")
46+
# get column names from csv,
47+
# because they might not have same order
48+
colnames = ",".join(next(csv_reader))
49+
cmd(
50+
f"""
5151
COPY {table_name} ({colnames}) FROM '{docker_path}' WITH (FORMAT CSV, DELIMITER ',',HEADER MATCH);
5252
"""
53-
)
54-
return outputs
53+
)
54+
return outputs
55+
5556

5657
def get_parser():
57-
parser = argparse.ArgumentParser(description=DESCRIPTION)
58-
parser.add_argument(
59-
"source_dir",
60-
type=str,
61-
help="path to source directory, containing csv, on the host machine"
62-
)
63-
parser.add_argument(
64-
"docker_dir",
65-
type=str,
66-
help="path to source directory, containing csv in the postgres docker container"
67-
)
68-
return parser
58+
parser = argparse.ArgumentParser(description=DESCRIPTION)
59+
parser.add_argument(
60+
"source_dir",
61+
type=str,
62+
help="path to source directory, containing csv, on the host machine",
63+
)
64+
parser.add_argument(
65+
"docker_dir",
66+
type=str,
67+
help="path to source directory, containing csv in the postgres docker container",
68+
)
69+
return parser
70+
6971

7072
if __name__ == "__main__":
71-
parser = get_parser()
72-
args = parser.parse_args()
73-
outputs = generate_import_script(args.source_dir, args.docker_dir)
74-
print("\n".join(outputs))
73+
parser = get_parser()
74+
args = parser.parse_args()
75+
outputs = generate_import_script(args.source_dir, args.docker_dir)
76+
print("\n".join(outputs))

0 commit comments

Comments
 (0)