Skip to content

test: cleanup and lint for unused arguments in test functions #11145

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Jun 1, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion ibis/backends/athena/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@
self.connection = self.connect(schema_name=IBIS_ATHENA_TEST_DATABASE, **kw)

@staticmethod
def connect(*, tmpdir, worker_id, **kw) -> BaseBackend:
def connect(*, tmpdir, worker_id, **kw) -> BaseBackend: # noqa: ARG004

Check warning on line 104 in ibis/backends/athena/tests/conftest.py

View check run for this annotation

Codecov / codecov/patch

ibis/backends/athena/tests/conftest.py#L104

Added line #L104 was not covered by tests
return ibis.athena.connect(**CONNECT_ARGS, **kw)

def _remap_column_names(self, table_name: str) -> dict[str, str]:
Expand Down
2 changes: 1 addition & 1 deletion ibis/backends/bigquery/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def _load_data(self, **_: Any) -> None:
query.result()

@staticmethod
def connect(*, tmpdir, worker_id, **kw) -> Backend:
def connect(*, tmpdir, worker_id, **kw) -> Backend: # noqa: ARG004
"""Connect to the test project and dataset."""
credentials, default_project_id = google.auth.default(
scopes=EXTERNAL_DATA_SCOPES
Expand Down
2 changes: 1 addition & 1 deletion ibis/backends/bigquery/tests/system/test_connect.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def test_repeated_project_name(project_id, credentials):
def test_project_id_different_from_default_credentials(monkeypatch):
creds = mock.create_autospec(auth.Credentials)

def mock_credentials(*args, **kwargs):
def mock_credentials(*_, **__):
return creds, "default-project-id"

monkeypatch.setattr(pydata_google_auth, "default", mock_credentials)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def my_add(a: float, b: float) -> float:
)


def test_udf_with_struct(alltypes, df, snapshot):
def test_udf_with_struct(alltypes, df):
@udf.scalar.python
def my_struct_thing(a: float, b: float) -> dt.Struct(
{"width": float, "height": float}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
function f() {
class Foo {
class _ {
do_stuff() {
while (true) {
let i = 1;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
function f(a) {
function f(_) {
let x = (new Set());
let y = 1;
x.add(y);
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
function f(a) {
function f(_) {
let x = {};
let y = '2';
x[y] = y;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
function* f(a) {
function* f(_) {
yield* [1, 2, 3];
}
8 changes: 4 additions & 4 deletions ibis/backends/bigquery/tests/unit/udf/test_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def f():


def test_yield(snapshot):
def f(a):
def f(_):
yield from [1, 2, 3]

js = compile(f)
Expand Down Expand Up @@ -235,7 +235,7 @@ def test_class_with_properties(snapshot):


def test_set_to_object(snapshot):
def f(a):
def f(_):
x = set()
y = 1
x.add(y)
Expand All @@ -246,7 +246,7 @@ def f(a):


def test_setitem(snapshot):
def f(a):
def f(_):
x = {}
y = "2"
x[y] = y
Expand All @@ -271,7 +271,7 @@ def f(a):

def test_scope_with_while(snapshot):
def f():
class Foo:
class _:
def do_stuff(self):
while True:
i = 1
Expand Down
2 changes: 1 addition & 1 deletion ibis/backends/bigquery/tests/unit/udf/test_usage.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def my_len(s: str) -> float:
def test_udf_int64(argument_type, return_type):
# invalid argument type, valid return type
@udf.scalar.python(signature=((argument_type,), return_type))
def my_func(x):
def my_func(_x):
return 1

expr = my_func(None)
Expand Down
6 changes: 5 additions & 1 deletion ibis/backends/clickhouse/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,11 @@ def postload(self, **kw: Any):

@staticmethod
def connect(
*, tmpdir, worker_id, settings: Mapping[str, Any] | None = None, **kw: Any
*,
tmpdir, # noqa: ARG004
worker_id, # noqa: ARG004
settings: Mapping[str, Any] | None = None,
**kw: Any,
):
if settings is None:
settings = {}
Expand Down
44 changes: 22 additions & 22 deletions ibis/backends/clickhouse/tests/test_aggregations.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,48 +45,48 @@ def test_reduction_invalid_where(alltypes, reduction):
("func", "pandas_func"),
[
(
lambda t, cond: t.bool_col.count(),
lambda df, cond: df.bool_col.count(),
lambda t, _: t.bool_col.count(),
lambda df, _: df.bool_col.count(),
),
(
lambda t, cond: t.bool_col.approx_nunique(),
lambda df, cond: df.bool_col.nunique(),
lambda t, _: t.bool_col.approx_nunique(),
lambda df, _: df.bool_col.nunique(),
),
(
lambda t, cond: t.double_col.sum(),
lambda df, cond: df.double_col.sum(),
lambda t, _: t.double_col.sum(),
lambda df, _: df.double_col.sum(),
),
(
lambda t, cond: t.double_col.mean(),
lambda df, cond: df.double_col.mean(),
lambda t, _: t.double_col.mean(),
lambda df, _: df.double_col.mean(),
),
(
lambda t, cond: t.int_col.approx_median(),
lambda df, cond: df.int_col.median(),
lambda t, _: t.int_col.approx_median(),
lambda df, _: df.int_col.median(),
),
(
lambda t, cond: t.double_col.min(),
lambda df, cond: df.double_col.min(),
lambda t, _: t.double_col.min(),
lambda df, _: df.double_col.min(),
),
(
lambda t, cond: t.double_col.max(),
lambda df, cond: df.double_col.max(),
lambda t, _: t.double_col.max(),
lambda df, _: df.double_col.max(),
),
(
lambda t, cond: t.double_col.var(),
lambda df, cond: df.double_col.var(),
lambda t, _: t.double_col.var(),
lambda df, _: df.double_col.var(),
),
(
lambda t, cond: t.double_col.std(),
lambda df, cond: df.double_col.std(),
lambda t, _: t.double_col.std(),
lambda df, _: df.double_col.std(),
),
(
lambda t, cond: t.double_col.var(how="sample"),
lambda df, cond: df.double_col.var(ddof=1),
lambda t, _: t.double_col.var(how="sample"),
lambda df, _: df.double_col.var(ddof=1),
),
(
lambda t, cond: t.double_col.std(how="pop"),
lambda df, cond: df.double_col.std(ddof=0),
lambda t, _: t.double_col.std(how="pop"),
lambda df, _: df.double_col.std(ddof=0),
),
(
lambda t, cond: t.bool_col.count(where=cond),
Expand Down
4 changes: 2 additions & 2 deletions ibis/backends/clickhouse/tests/test_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -380,13 +380,13 @@ def test_password_with_bracket():
ibis.clickhouse.connect(host=host, user=user, port=port, password=quoted_pass)


def test_from_url(con):
def test_from_url():
assert ibis.connect(
f"clickhouse://{CLICKHOUSE_USER}:{CLICKHOUSE_PASS}@{CLICKHOUSE_HOST}:{CLICKHOUSE_PORT}/{IBIS_TEST_CLICKHOUSE_DB}"
)


def test_from_url_with_kwargs(con):
def test_from_url_with_kwargs():
# since explicit kwargs take precedence, this passes, because we're passing
# `database` explicitly, even though our connection string says to use a
# random database
Expand Down
4 changes: 2 additions & 2 deletions ibis/backends/clickhouse/tests/test_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,13 +78,13 @@ def test_timestamp_cast(alltypes, assert_sql):
assert_sql(result2, "out2.sql")


def test_timestamp_now(con, assert_sql):
def test_timestamp_now(assert_sql):
expr = ibis.now()
assert_sql(expr)


@pytest.mark.parametrize("unit", ["y", "m", "d", "w", "h", "minute"])
def test_timestamp_truncate(con, unit, assert_sql):
def test_timestamp_truncate(unit, assert_sql):
stamp = ibis.timestamp("2009-05-17 12:34:56")
expr = stamp.truncate(unit)
assert_sql(expr)
Expand Down
2 changes: 1 addition & 1 deletion ibis/backends/databricks/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@
cur.execute(stmt)

@staticmethod
def connect(*, tmpdir, worker_id, **kw) -> BaseBackend:
def connect(*, tmpdir, worker_id, **kw) -> BaseBackend: # noqa: ARG004

Check warning on line 69 in ibis/backends/databricks/tests/conftest.py

View check run for this annotation

Codecov / codecov/patch

ibis/backends/databricks/tests/conftest.py#L69

Added line #L69 was not covered by tests
return ibis.databricks.connect(
server_hostname=env["DATABRICKS_SERVER_HOSTNAME"],
http_path=env["DATABRICKS_HTTP_PATH"],
Expand Down
2 changes: 1 addition & 1 deletion ibis/backends/datafusion/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def _load_data(self, **_: Any) -> None:
con.create_table("topk", topk)

@staticmethod
def connect(*, tmpdir, worker_id, **kw):
def connect(*, tmpdir, worker_id, **kw): # noqa: ARG004
return ibis.datafusion.connect(**kw)

def _load_tpc(self, *, suite, scale_factor):
Expand Down
4 changes: 2 additions & 2 deletions ibis/backends/datafusion/tests/test_udf.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,12 @@
with pytest.warns(FutureWarning, match="v9.0"):

@elementwise(input_type=["string"], output_type="int64")
def my_string_length(arr, **kwargs):
def my_string_length(arr, **_):
# arr is a pyarrow.StringArray
return pc.cast(pc.multiply(pc.utf8_length(arr), 2), target_type="int64")

@elementwise(input_type=[dt.int64, dt.int64], output_type=dt.int64)
def my_add(arr1, arr2, **kwargs):
def my_add(arr1, arr2, **_):
return pc.add(arr1, arr2)

@reduction(input_type=[dt.float64], output_type=dt.float64)
Expand Down
2 changes: 1 addition & 1 deletion ibis/backends/druid/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,5 +145,5 @@ def _load_data(self, **_: Any) -> None:
fut.result()

@staticmethod
def connect(*, tmpdir, worker_id, **kw):
def connect(*, tmpdir, worker_id, **kw): # noqa: ARG004
return ibis.connect(DRUID_URL, **kw)
8 changes: 4 additions & 4 deletions ibis/backends/duckdb/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ def ddl_script(self) -> Iterator[str]:
yield from super().ddl_script

@staticmethod
def connect(*, tmpdir, worker_id, **kw) -> BaseBackend:
def connect(*, tmpdir, worker_id, **kw) -> BaseBackend: # noqa: ARG004
# use an extension directory per test worker to prevent simultaneous
# downloads on windows
#
Expand Down Expand Up @@ -150,12 +150,12 @@ def gpd():


@pytest.fixture(scope="session")
def zones(con, data_dir, gpd):
def zones(con, data_dir):
return con.read_geo(data_dir / "geojson" / "zones.geojson")


@pytest.fixture(scope="session")
def lines(con, data_dir, gpd):
def lines(con, data_dir):
return con.read_geo(data_dir / "geojson" / "lines.geojson")


Expand All @@ -170,7 +170,7 @@ def lines_gdf(data_dir, gpd):


@pytest.fixture(scope="session")
def geotable(con, gpd):
def geotable(con):
return con.table("geo")


Expand Down
4 changes: 2 additions & 2 deletions ibis/backends/duckdb/tests/test_datatypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def test_cast_to_floating_point_type(con, snapshot, typ):
snapshot.assert_match(sql, "out.sql")


def null_literal(array_min_size: int = 0, array_max_size: int | None = None):
def null_literal():
true = st.just(True)

field_names = st.text(alphabet=string.ascii_lowercase + string.digits, min_size=1)
Expand Down Expand Up @@ -162,7 +162,7 @@ def null_literal(array_min_size: int = 0, array_max_size: int | None = None):
)


@h.given(lit=null_literal(array_min_size=1, array_max_size=8192))
@h.given(lit=null_literal())
@h.settings(suppress_health_check=[h.HealthCheck.function_scoped_fixture])
def test_null_scalar(con, lit, monkeypatch):
monkeypatch.setattr(ibis.options, "default_backend", con)
Expand Down
2 changes: 1 addition & 1 deletion ibis/backends/duckdb/tests/test_decompile_tpch.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def set_database(con, db):
param(3, id="tpch03"),
],
)
def test_parse_sql_tpch(tpch_query, snapshot, con, data_dir):
def test_parse_sql_tpch(tpch_query, snapshot, con):
tpch_query_file = SQL_QUERY_PATH / f"{tpch_query:02d}.sql"
with open(tpch_query_file) as f:
sql = f.read()
Expand Down
2 changes: 1 addition & 1 deletion ibis/backends/duckdb/tests/test_geospatial.py
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ def test_create_table_geospatial_types(geotable, con):


@pytest.mark.parametrize("expr", [point, point_geom])
def test_literal_geospatial_explicit(con, expr, assert_sql):
def test_literal_geospatial_explicit(expr, assert_sql):
assert_sql(expr)


Expand Down
2 changes: 1 addition & 1 deletion ibis/backends/exasol/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ class TestConf(ServiceBackendTest):
deps = ("pyexasol",)

@staticmethod
def connect(*, tmpdir, worker_id, **kw: Any):
def connect(*, tmpdir, worker_id, **kw: Any): # noqa: ARG004
return ibis.exasol.connect(
user=EXASOL_USER,
password=EXASOL_PASS,
Expand Down
4 changes: 2 additions & 2 deletions ibis/backends/flink/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ class TestConf(BackendTest):
deps = "pandas", "pyflink"

@staticmethod
def connect(*, tmpdir, worker_id, **kw: Any):
def connect(*, tmpdir, worker_id, **kw: Any): # noqa: ARG004
"""Flink backend is created in batch mode by default. This is to
comply with the assumption that the tests under ibis/ibis/backends/tests/
are for batch (storage or processing) backends.
Expand Down Expand Up @@ -125,7 +125,7 @@ def _load_data(self, **_: Any) -> None:

class TestConfForStreaming(TestConf):
@staticmethod
def connect(*, tmpdir, worker_id, **kw: Any):
def connect(*, tmpdir, worker_id, **kw: Any): # noqa: ARG004
"""Flink backend is created in streaming mode here. To be used
in the tests under ibis/ibis/backends/flink/tests/.
We only use mini cluster here for simplicity.
Expand Down
2 changes: 1 addition & 1 deletion ibis/backends/impala/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def postload(self, **kw):
self.connection = self.connect(database=env.test_data_db, **kw)

@staticmethod
def connect(*, tmpdir, worker_id, **kw):
def connect(*, tmpdir, worker_id, **kw): # noqa: ARG004
env = IbisTestEnv()
return ibis.impala.connect(host=env.impala_host, port=env.impala_port, **kw)

Expand Down
2 changes: 1 addition & 1 deletion ibis/backends/mssql/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def _load_data(self, *, database: str = IBIS_TEST_MSSQL_DB, **_):
super()._load_data(database=database, **_)

@staticmethod
def connect(*, tmpdir, worker_id, **kw):
def connect(*, tmpdir, worker_id, **kw): # noqa: ARG004
return ibis.mssql.connect(
host=MSSQL_HOST,
user=MSSQL_USER,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
SELECT
[t0].[color],
[t0].[price],
RANK() OVER (
PARTITION BY [t0].[color]
ORDER BY CASE WHEN [t0].[price] IS NULL THEN 1 ELSE 0 END, [t0].[price] ASC
) - 1 AS [MinRank()]
FROM [diamonds_sample] AS [t0]
8 changes: 8 additions & 0 deletions ibis/backends/mssql/tests/test_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -347,3 +347,11 @@ def test_mssql_without_password_is_valid():
)
def test_list_tables_with_dash(con, database):
assert con.list_tables(database=database)


def test_rank_no_window_frame(snapshot):
t = ibis.table(schema=dict(color=str, price=int), name="diamonds_sample")
expr = t.mutate(ibis.rank().over(group_by="color", order_by="price"))
sql = ibis.to_sql(expr, dialect="mssql")

snapshot.assert_match(sql, "out.sql")
Loading
Loading