Skip to content

Add ruff flake8-bugbear rules #1325

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 25 commits into from
Jun 26, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
8e081fc
remove xfail_value_error definition since it was only used once
bmos Mar 1, 2025
681ce52
annotate ruff rules in pyproject.toml
bmos Mar 1, 2025
98332db
include all pycodestyle errors except line-too-long (E501) -- conflic…
bmos Mar 1, 2025
c3f3e9f
add flake8-tidy-imports (TID)
bmos Mar 1, 2025
49ac2ef
add flake8-import-conventions (ICN)
bmos Mar 1, 2025
9335c0e
add flake8-quotes (Q)
bmos Mar 1, 2025
1dec870
add flake8-type-checking (TC)
bmos Mar 1, 2025
81d94fb
Add flake8-2020 (YTT) rules
bmos Mar 1, 2025
6650d55
Add flake8-bugbear (B)
bmos Mar 1, 2025
82c0958
Merge branch 'main' into flake8-bugbear
bmos Mar 11, 2025
1014aac
Merge branch 'move-coop:main' into flake8-bugbear
bmos Mar 20, 2025
5a40c82
Merge branch 'main' into flake8-bugbear
bmos Mar 30, 2025
cdac784
Merge branch 'main' into flake8-bugbear
bmos Apr 5, 2025
e32f6af
additional bugbear fixes after merging main
bmos Apr 5, 2025
71e87c2
Merge branch 'main' into flake8-bugbear
bmos Apr 25, 2025
ce3cc9c
Merge branch 'main' into flake8-bugbear
bmos Apr 25, 2025
f996c8e
Merge branch 'main' into flake8-bugbear
bmos May 2, 2025
d9ce485
Merge branch 'main' into flake8-bugbear
bmos May 11, 2025
0fa783a
Merge branch 'main' into flake8-bugbear
bmos May 25, 2025
3bcb2f4
Merge branch 'main' into flake8-bugbear
bmos May 31, 2025
b69e0a2
Merge branch 'main' into flake8-bugbear
bmos Jun 2, 2025
569e969
Merge branch 'main' into flake8-bugbear
bmos Jun 10, 2025
2ce64e5
Merge branch 'main' into flake8-bugbear
bmos Jun 26, 2025
823d3fc
update success_codes docstrings
bmos Jun 26, 2025
2515cff
fix docstrings again
bmos Jun 26, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@
html_sidebars = {"**": ["versions.html"]}

try:
html_context
html_context # noqa: B018
except NameError:
html_context = dict()

Expand Down
4 changes: 2 additions & 2 deletions parsons/action_network/action_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -1254,7 +1254,7 @@ def upsert_person(
mobile_numbers_field = [{"number": str(mobile_number)}]
elif isinstance(mobile_number, list):
if len(mobile_number) > 1:
raise ("Action Network allows only 1 phone number per activist")
raise Exception("Action Network allows only 1 phone number per activist")
if isinstance(mobile_number[0], list):
mobile_numbers_field = [
{"number": re.sub("[^0-9]", "", cell)} for cell in mobile_number
Expand All @@ -1276,7 +1276,7 @@ def upsert_person(
mobile_numbers_field = mobile_number

if not email_addresses_field and not mobile_numbers_field:
raise (
raise Exception(
"Either email_address or mobile_number is required and can be formatted "
"as a string, list of strings, a dictionary, a list of dictionaries, or "
"(for mobile_number only) an integer or list of integers"
Expand Down
8 changes: 6 additions & 2 deletions parsons/auth0/auth0.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,8 @@ def upsert_user(
username=None,
given_name=None,
family_name=None,
app_metadata={},
user_metadata={},
app_metadata=None,
user_metadata=None,
connection="Username-Password-Authentication",
):
"""
Expand All @@ -106,6 +106,10 @@ def upsert_user(
Requests Response object
"""

if user_metadata is None:
user_metadata = {}
if app_metadata is None:
app_metadata = {}
obj = {
"email": email.lower(),
"username": username,
Expand Down
8 changes: 6 additions & 2 deletions parsons/aws/aws_async.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@ def event_command(event, context):

def run(
func,
args=[],
kwargs={},
args=None,
kwargs=None,
service="lambda",
capture_response=False,
remote_aws_lambda_function_name=None,
Expand All @@ -56,6 +56,10 @@ def run(
func_class_init_kwargs=None,
**task_kwargs,
):
if kwargs is None:
kwargs = {}
if args is None:
args = []
lambda_function_name = remote_aws_lambda_function_name or os.environ.get(
"AWS_LAMBDA_FUNCTION_NAME"
)
Expand Down
2 changes: 1 addition & 1 deletion parsons/box/box.py
Original file line number Diff line number Diff line change
Expand Up @@ -399,6 +399,6 @@ def get_item_id(self, path, base_folder_id=DEFAULT_FOLDER_ID) -> str:
# recursion, just pass it on up.
except ValueError as e:
if base_folder_id == DEFAULT_FOLDER_ID:
raise ValueError(f'{e}: "{path}"')
raise ValueError(f'{e}: "{path}"') from e
else:
raise
4 changes: 2 additions & 2 deletions parsons/copper/copper.py
Original file line number Diff line number Diff line change
Expand Up @@ -284,8 +284,8 @@ def process_json(self, json_blob, obj_type, tidy=False):
for column in list_cols:
# Check for nested data
list_rows = obj_table.select_rows(
lambda row: isinstance(row[column], list)
and any(isinstance(x, dict) for x in row[column])
lambda row: isinstance(row[column], list) # noqa: B023
and any(isinstance(x, dict) for x in row[column]) # noqa: B023
)
# Add separate long table for each column with nested data
if list_rows.num_rows > 0:
Expand Down
2 changes: 1 addition & 1 deletion parsons/databases/alchemy.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,6 @@ def split_table_name(full_table_name):
schema, table = full_table_name.split(".")
except ValueError as e:
if "too many values to unpack" in str(e):
raise ValueError(f"Invalid database table {full_table_name}")
raise ValueError(f"Invalid database table {full_table_name}") from e

return schema, table
2 changes: 1 addition & 1 deletion parsons/databases/postgres/postgres_create_statement.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def generate_data_types(self, table):
cont = petl.records(table.table)

# Populate empty values for the columns
for col in table.columns:
for _col in table.columns:
longest.append(0)
type_list.append("")

Expand Down
2 changes: 1 addition & 1 deletion parsons/databases/redshift/rs_create_table.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def generate_data_types(self, table):
cont = petl.records(table.table)

# Populate empty values for the columns
for col in table.columns:
for _col in table.columns:
longest.append(0)
type_list.append("")

Expand Down
2 changes: 1 addition & 1 deletion parsons/databases/redshift/rs_table_utilities.py
Original file line number Diff line number Diff line change
Expand Up @@ -755,7 +755,7 @@ def split_full_table_name(full_table_name):
schema, table = full_table_name.split(".")
except ValueError as e:
if "too many values to unpack" in str(e):
raise ValueError(f"Invalid Redshift table {full_table_name}")
raise ValueError(f"Invalid Redshift table {full_table_name}") from e

return schema, table

Expand Down
2 changes: 1 addition & 1 deletion parsons/etl/table.py
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ def is_valid_table(self):
return False

try:
self.columns
self.columns # noqa: B018
except StopIteration:
return False

Expand Down
4 changes: 3 additions & 1 deletion parsons/formstack/formstack.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def __init__(self, api_token: Optional[str] = None):
self.client = APIConnector(API_URI, headers=headers)

def _get_paginated_request(
self, url: str, data_key: str, params: dict = {}, large_request: bool = False
self, url: str, data_key: str, params: dict = None, large_request: bool = False
) -> Table:
"""
Make a GET request for any endpoint that returns a list of data. Will check pagination.
Expand Down Expand Up @@ -67,6 +67,8 @@ def _get_paginated_request(
Table Class
A table with the returned data.
"""
if params is None:
params = {}
data = Table()
page = 1
pages = None
Expand Down
8 changes: 5 additions & 3 deletions parsons/github/github.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,10 +41,10 @@ def wrap_github_404(func):
def _wrapped_func(*args, **kwargs):
try:
return (func)(*args, **kwargs)
except UnknownObjectException:
except UnknownObjectException as e:
raise ParsonsGitHubError(
"Couldn't find the object you referenced, maybe you need to log in?"
)
) from e

return _wrapped_func

Expand Down Expand Up @@ -223,7 +223,7 @@ def list_repo_issues(
assignee=None,
creator=None,
mentioned=None,
labels=[],
labels=None,
sort="created",
direction="desc",
since=None,
Expand Down Expand Up @@ -262,6 +262,8 @@ def list_repo_issues(
Table with page of repo issues
"""

if labels is None:
labels = []
logger.info(f"Listing page {page} of issues for repo {repo_name}")

kwargs_dict = {"state": state, "sort": sort, "direction": direction}
Expand Down
24 changes: 14 additions & 10 deletions parsons/google/google_bigquery.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,15 +155,17 @@ def __init__(
app_creds: Optional[Union[str, dict, Credentials]] = None,
project=None,
location=None,
client_options: dict = {
"scopes": [
"https://www.googleapis.com/auth/drive",
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform",
]
},
client_options: dict = None,
tmp_gcs_bucket: Optional[str] = None,
):
if client_options is None:
client_options = {
"scopes": [
"https://www.googleapis.com/auth/drive",
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform",
]
}
self.app_creds = app_creds

if isinstance(app_creds, Credentials):
Expand Down Expand Up @@ -974,7 +976,7 @@ def _stringify_records(self, tbl):
if "dict" in field["type"] or "list" in field["type"]:
new_petl = tbl.table.addfield(
field["name"] + "_replace",
lambda row: json.dumps(row[field["name"]]),
lambda row, field=field: json.dumps(row[field["name"]]),
)
new_tbl = Table(new_petl)
new_tbl.remove_column(field["name"])
Expand Down Expand Up @@ -1034,8 +1036,10 @@ def _prepare_local_upload_job(
for column in tbl.columns:
try:
schema_row = [i for i in job_config.schema if i.name.lower() == column.lower()][0]
except IndexError:
raise IndexError(f"Column found in Table that was not found in schema: {column}")
except IndexError as e:
raise IndexError(
f"Column found in Table that was not found in schema: {column}"
) from e
schema.append(schema_row)
job_config.schema = schema

Expand Down
6 changes: 3 additions & 3 deletions parsons/google/google_sheets.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,8 @@ def _get_worksheet(self, spreadsheet_id, worksheet=0):
idx = self.list_worksheets(spreadsheet_id).index(worksheet)
try:
return self.gspread_client.open_by_key(spreadsheet_id).get_worksheet(idx)
except: # noqa: E722
raise ValueError(f"Couldn't find worksheet {worksheet}")
except Exception as e: # noqa: E722
raise ValueError(f"Couldn't find worksheet {worksheet}") from e

else:
raise ValueError(f"Couldn't find worksheet index or title {worksheet}")
Expand Down Expand Up @@ -288,7 +288,7 @@ def append_to_sheet(

cells = []
for row_num, row in enumerate(table.data):
for col_num, cell in enumerate(row):
for col_num, _cell in enumerate(row):
# Add 2 to allow for the header row, and for google sheets indexing starting at 1
sheet_row_num = existing_table.num_rows + row_num + 2
cells.append(gspread.Cell(sheet_row_num, col_num + 1, row[col_num]))
Expand Down
Loading
Loading