Skip to content

Add ruff flake8-pytest-style rules #1369

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 23 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
8091c8e
add flake8-pytest-style ruff rules
bmos Mar 21, 2025
7b42fe9
Resolve PT022 pytest-useless-yield-fixture
bmos Mar 21, 2025
226a789
Resolve PT006 pytest-parametrize-names-wrong-type
bmos Mar 21, 2025
28dd649
resolve pytest-unittest-assertion (PT009)
bmos Mar 21, 2025
0bb2855
Resolve pytest-unittest-raises-assertion (PT027)
bmos Mar 21, 2025
fab2406
Resolve pytest-extraneous-scope-function (PT003)
bmos Mar 21, 2025
bdeba85
Resolve true-false-comparison (E712)
bmos Mar 21, 2025
230dbfa
Resolve none-comparison (E711)
bmos Mar 21, 2025
6d0f33a
Resolve pytest-composite-assertion (PT018)
bmos Mar 21, 2025
04c073c
resolve pytest-parametrize-values-wrong-type (PT007)
bmos Mar 21, 2025
b377a9e
Resolve pytest-duplicate-parametrize-test-cases (PT014)
bmos Mar 21, 2025
0d13d44
Resolve pytest-raises-with-multiple-statements (PT012)
bmos Mar 21, 2025
58f8002
Resolve pytest-raises-too-broad (PT011)
bmos Mar 21, 2025
7ee1cf3
Merge branch 'move-coop:main' into flake8-pytest-style
bmos Mar 22, 2025
d51f888
Merge branch 'main' into flake8-pytest-style
bmos Mar 30, 2025
2a14f8b
Merge branch 'main' into flake8-pytest-style
bmos Apr 5, 2025
f973789
resolve PT015 Assertion always fails
bmos Apr 5, 2025
ff53821
Merge branch 'main' into flake8-pytest-style
bmos Apr 25, 2025
a6695fe
Merge branch 'main' into flake8-pytest-style
bmos Apr 25, 2025
e135038
use test.utils.mark_live_test rather than unittest.skipIf(...)
bmos Apr 27, 2025
57e18e8
improved test_date_to_timestamp_* testing
bmos Apr 27, 2025
d466f78
Merge branch 'main' into flake8-pytest-style
bmos May 11, 2025
9800437
flake8 pytest fixes for actblue and action builder
bmos May 11, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 2 additions & 4 deletions parsons/box/box.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,8 +241,7 @@ def upload_table_to_folder_id(

if format not in self.ALLOWED_FILE_FORMATS:
raise ValueError(
f"Format argument to upload_table() must be in one "
f'of {self.ALLOWED_FILE_FORMATS}; found "{format}"'
f'Format argument to upload_table() must be in one of {self.ALLOWED_FILE_FORMATS}; found "{format}"'
)

# Create a temp directory in which we will let Parsons create a
Expand Down Expand Up @@ -320,8 +319,7 @@ def get_table_by_file_id(self, file_id, format="csv") -> Table:
"""
if format not in self.ALLOWED_FILE_FORMATS:
raise ValueError(
f"Format argument to upload_table() must be in one "
f'of {self.ALLOWED_FILE_FORMATS}; found "{format}"'
f'Format argument to upload_table() must be in one of {self.ALLOWED_FILE_FORMATS}; found "{format}"'
)

# Temp file will be around as long as enclosing process is running,
Expand Down
12 changes: 4 additions & 8 deletions parsons/capitol_canary/capitol_canary.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,14 +259,12 @@ def create_advocate(

if (sms_optin or sms_optout) and not phone:
raise ValueError(
"When opting an advocate in or out of SMS messages, you must specify a valid "
"phone and one or more campaigns"
"When opting an advocate in or out of SMS messages, you must specify a valid phone and one or more campaigns"
)

if (email_optin or email_optout) and not email:
raise ValueError(
"When opting an advocate in or out of email messages, you must specify a valid "
"email address and one or more campaigns"
"When opting an advocate in or out of email messages, you must specify a valid email address and one or more campaigns"
)

# Align our arguments with the expected parameters for the API
Expand Down Expand Up @@ -353,14 +351,12 @@ def update_advocate(
# Validate the passed in arguments
if (sms_optin or sms_optout) and not (phone and campaigns):
raise ValueError(
"When opting an advocate in or out of SMS messages, you must specify a valid "
"phone and one or more campaigns"
"When opting an advocate in or out of SMS messages, you must specify a valid phone and one or more campaigns"
)

if (email_optin or email_optout) and not (email and campaigns):
raise ValueError(
"When opting an advocate in or out of email messages, you must specify a valid "
"email address and one or more campaigns"
"When opting an advocate in or out of email messages, you must specify a valid email address and one or more campaigns"
)

# Align our arguments with the expected parameters for the API
Expand Down
3 changes: 1 addition & 2 deletions parsons/databases/discover_database.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,7 @@ def discover_database(
if len(detected) > 1:
if default_connector is None:
raise EnvironmentError(
f"Multiple database configurations detected: {detected}."
" Please specify a default connector."
f"Multiple database configurations detected: {detected}. Please specify a default connector."
)

if isinstance(default_connector, list):
Expand Down
2 changes: 1 addition & 1 deletion parsons/databases/redshift/rs_table_utilities.py
Original file line number Diff line number Diff line change
Expand Up @@ -755,7 +755,7 @@ def split_full_table_name(full_table_name):
schema, table = full_table_name.split(".")
except ValueError as e:
if "too many values to unpack" in str(e):
raise ValueError(f"Invalid Redshift table {full_table_name}")
raise ValueError(f"Invalid Redshift table {full_table_name}") from e

return schema, table

Expand Down
2 changes: 1 addition & 1 deletion parsons/donorbox/donorbox.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,6 @@ def _date_format_helper(self, date_string):
except ValueError:
continue
raise ValueError(
f"The date you supplied, {date_string}, is not a valid Donorbox format."
f"The date you supplied, {date_string}, is not a valid Donorbox format. "
"Try the following formats: YYYY-mm-dd YYYY/mm/dd YYYYmmdd dd-mm-YYYY"
)
6 changes: 3 additions & 3 deletions parsons/nation_builder/nation_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def __init__(self, slug: Optional[str] = None, access_token: Optional[str] = Non
@classmethod
def get_uri(cls, slug: Optional[str]) -> str:
if slug is None:
raise ValueError("slug can't None")
raise ValueError("slug can't be None")

if not isinstance(slug, str):
raise ValueError("slug must be an str")
Expand All @@ -50,7 +50,7 @@ def get_uri(cls, slug: Optional[str]) -> str:
@classmethod
def get_auth_headers(cls, access_token: Optional[str]) -> Dict[str, str]:
if access_token is None:
raise ValueError("access_token can't None")
raise ValueError("access_token can't be None")

if not isinstance(access_token, str):
raise ValueError("access_token must be an str")
Expand Down Expand Up @@ -133,7 +133,7 @@ def update_person(self, person_id: str, person: Dict[str, Any]) -> Dict[str, Any
A person object with the updated data.
"""
if person_id is None:
raise ValueError("person_id can't None")
raise ValueError("person_id can't be None")

if not isinstance(person_id, str):
raise ValueError("person_id must be a str")
Expand Down
20 changes: 9 additions & 11 deletions parsons/ngpvan/people.py
Original file line number Diff line number Diff line change
Expand Up @@ -387,17 +387,15 @@ def _valid_search(
and None in [firstName, lastName, addressLine1, zipOrPostalCode]
and None in [email]
):
raise ValueError(
"""
Person find must include the following minimum
combinations to conduct a search.
- first_name, last_name, email
- first_name, last_name, phone
- first_name, last_name, zip, dob
- first_name, last_name, street_number, street_name, zip
- email
"""
)
error_msg = """
Person find must include the following minimum combinations to conduct a search.
- first_name, last_name, email
- first_name, last_name, phone
- first_name, last_name, zip, dob
- first_name, last_name, street_number, street_name, zip
- email
"""
raise ValueError(error_msg)

return True

Expand Down
3 changes: 1 addition & 2 deletions parsons/ngpvan/scores.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,7 @@ def update_score_status(self, score_update_id, status):

if status not in ["pending approval", "approved", "disapproved", "canceled"]:
raise ValueError(
"""Valid inputs for status are, 'pending approval',
'approved','disapproved','canceled'"""
"""Valid inputs for status are, 'pending approval','approved','disapproved','canceled'"""
)

else:
Expand Down
5 changes: 1 addition & 4 deletions parsons/notifications/sendmail.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,10 +193,7 @@ def _validate_email_string(self, str):
self.log.debug(f"Validating email {str}...")
realname, email_addr = parseaddr(str)

if not email_addr:
raise ValueError("Invalid email address.")

if not validate_email(email_addr):
if not email_addr or not validate_email(email_addr):
raise ValueError("Invalid email address.")

return True
Expand Down
2 changes: 1 addition & 1 deletion parsons/targetsmart/targetsmart_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,7 @@ def district(
raise ValueError("Search type 'address' requires 'address' argument")

elif search_type not in ["zip", "point", "address"]:
raise KeyError("Invalid 'search_type' provided. ")
raise KeyError("Invalid 'search_type' provided.")

else:
pass
Expand Down
3 changes: 1 addition & 2 deletions parsons/targetsmart/targetsmart_automation.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,8 +212,7 @@ def config_status(self, job_name):
logger.info(f"Match job {job_name} configuration error.")
# To Do: Lift up the configuration error.
raise ValueError(
"Job configuration failed. If you provided an email"
"address, you will be sent more details."
"Job configuration failed. If you provided an email address, you will be sent more details."
)

else:
Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ select = [
"TC", # flake8-type-checking (TC)
"YTT", # flake8-2020 (YTT)
"ISC", # flake8-implicit-str-concat (ISC)
"PT", # flake8-pytest-style (PT)
]
ignore = [
"E501", # line-too-long (E501)
Expand Down
5 changes: 3 additions & 2 deletions test/test_actblue/test_actblue.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import unittest
from unittest.mock import MagicMock

import pytest
import requests_mock

from parsons import ActBlue, Table
Expand Down Expand Up @@ -87,7 +88,7 @@ def test_error_on_complete_without_download_url(self, m):

m.get(f"{TEST_URI}/csvs/{TEST_ID}", json=mocked_get_response_no_url)

with self.assertRaises(ValueError):
with pytest.raises(ValueError, match="CSV generation failed"):
self.ab.get_download_url(csv_id=TEST_ID)

@requests_mock.Mocker()
Expand All @@ -100,7 +101,7 @@ def test_error_on_unexpected_status(self, m):

m.get(f"{TEST_URI}/csvs/{TEST_ID}", json=mocked_get_response_no_url)

with self.assertRaises(ValueError):
with pytest.raises(ValueError, match="CSV generation failed"):
self.ab.get_download_url(csv_id=TEST_ID)

@requests_mock.Mocker()
Expand Down
1 change: 1 addition & 0 deletions test/test_action_builder/test_action_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -473,6 +473,7 @@ def test_upsert_connection_missing_identifiers(self, m):
self.bldr.upsert_connection(self.fake_entity_id)
with pytest.raises(ValueError, match="Must provide exactly two identifiers"):
self.bldr.upsert_connection([self.fake_entity_id])
with pytest.raises(ValueError, match="Must provide exactly two identifiers"):
self.bldr.upsert_connection(
[self.fake_entity_id, "fake-entity-id-2", "fake-entity-id-3"]
)
Expand Down
58 changes: 28 additions & 30 deletions test/test_action_kit.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,20 +26,20 @@ def tearDown(self):
@mock.patch.dict(os.environ, ENV_PARAMETERS)
def test_from_environ(self):
actionkit = ActionKit()
self.assertEqual(actionkit.domain, "env_domain")
self.assertEqual(actionkit.username, "env_username")
self.assertEqual(actionkit.password, "env_password")
assert actionkit.domain == "env_domain"
assert actionkit.username == "env_username"
assert actionkit.password == "env_password"

def test_base_endpoint(self):
# Test the endpoint
url = self.actionkit._base_endpoint("user")
self.assertEqual(url, "https://domain.actionkit.com/rest/v1/user/")
assert url == "https://domain.actionkit.com/rest/v1/user/"

url = self.actionkit._base_endpoint("user", 1234)
self.assertEqual(url, "https://domain.actionkit.com/rest/v1/user/1234/")
assert url == "https://domain.actionkit.com/rest/v1/user/1234/"

url = self.actionkit._base_endpoint("user", "1234")
self.assertEqual(url, "https://domain.actionkit.com/rest/v1/user/1234/")
assert url == "https://domain.actionkit.com/rest/v1/user/1234/"

def test_delete_actionfield(self):
# Test delete actionfield
Expand Down Expand Up @@ -495,7 +495,7 @@ def test_paginated_get(self):
resp_mock.get.side_effect = [first_mock, second_mock]
self.actionkit.conn = resp_mock
results = self.actionkit.paginated_get("user", 150, order_by="created_at")
self.assertEqual(results.num_rows, 150)
assert results.num_rows == 150
calls = [
unittest.mock.call(
"https://domain.actionkit.com/rest/v1/user/",
Expand Down Expand Up @@ -523,9 +523,9 @@ def test_paginated_get_custom_limit(self):
resp_mock.get.side_effect = [first_mock, second_mock]
self.actionkit.conn = resp_mock
results = self.actionkit.paginated_get_custom_limit("user", 150, "value", 102)
self.assertEqual(results.num_rows, 102)
self.assertEqual(results.column_data("value")[0], 0)
self.assertEqual(results.column_data("value")[-1], 101)
assert results.num_rows == 102
assert results.column_data("value")[0] == 0
assert results.column_data("value")[-1] == 101
calls = [
unittest.mock.call(
"https://domain.actionkit.com/rest/v1/user/",
Expand Down Expand Up @@ -785,16 +785,16 @@ def test_bulk_upload_table(self):
),
"fake_page",
)
self.assertEqual(resp_mock.post.call_count, 2)
assert resp_mock.post.call_count == 2
name, args, kwargs = resp_mock.method_calls[1]
self.assertEqual(
kwargs["data"],
{"page": "fake_page", "autocreate_user_fields": 0, "user_fields_only": 0},
)
assert kwargs["data"] == {
"page": "fake_page",
"autocreate_user_fields": 0,
"user_fields_only": 0,
}
upload_data = kwargs["files"]["upload"].read()
self.assertEqual(
upload_data.decode(),
"user_id,user_customfield1,action_foo\r\n5,yes,123 Main St\r\n",
assert (
upload_data.decode() == "user_id,user_customfield1,action_foo\r\n5,yes,123 Main St\r\n"
)

def test_bulk_upload_table_userfields(self):
Expand All @@ -804,32 +804,30 @@ def test_bulk_upload_table_userfields(self):
self.actionkit.bulk_upload_table(
Table([("user_id", "user_customfield1"), (5, "yes")]), "fake_page"
)
self.assertEqual(resp_mock.post.call_count, 2)
assert resp_mock.post.call_count == 2
name, args, kwargs = resp_mock.method_calls[1]
self.assertEqual(
kwargs["data"],
{"page": "fake_page", "autocreate_user_fields": 0, "user_fields_only": 1},
)
self.assertEqual(
kwargs["files"]["upload"].read().decode(),
"user_id,user_customfield1\r\n5,yes\r\n",
)
assert kwargs["data"] == {
"page": "fake_page",
"autocreate_user_fields": 0,
"user_fields_only": 1,
}
assert kwargs["files"]["upload"].read().decode() == "user_id,user_customfield1\r\n5,yes\r\n"

def test_table_split(self):
test1 = Table([("x", "y", "z"), ("a", "b", ""), ("1", "", "3"), ("4", "", "6")])
tables = self.actionkit._split_tables_no_empties(test1, True, [])
self.assertEqual(len(tables), 2)
assert len(tables) == 2
assert_matching_tables(tables[0], Table([("x", "y"), ("a", "b")]))
assert_matching_tables(tables[1], Table([("x", "z"), ("1", "3"), ("4", "6")]))

test2 = Table([("x", "y", "z"), ("a", "b", "c"), ("1", "2", "3"), ("4", "5", "6")])
tables2 = self.actionkit._split_tables_no_empties(test2, True, [])
self.assertEqual(len(tables2), 1)
assert len(tables2) == 1
assert_matching_tables(tables2[0], test2)

test3 = Table([("x", "y", "z"), ("a", "b", ""), ("1", "2", "3"), ("4", "5", "6")])
tables3 = self.actionkit._split_tables_no_empties(test3, False, ["z"])
self.assertEqual(len(tables3), 2)
assert len(tables3) == 2
assert_matching_tables(tables3[0], Table([("x", "y"), ("a", "b")]))
assert_matching_tables(
tables3[1], Table([("x", "y", "z"), ("1", "2", "3"), ("4", "5", "6")])
Expand Down
Loading
Loading