@@ -157,7 +157,7 @@ def reset_table() -> None:
157
157
.whenNotMatchedBySourceUpdate (set = {"value" : "value + 0" }) \
158
158
.execute ()
159
159
self .__checkAnswer (merge_output ,
160
- ([Row (6 , # affected rows
160
+ ([Row (6 , # type: ignore[call-overload]
161
161
4 , # updated rows (a and b in WHEN MATCHED
162
162
# and c and d in WHEN NOT MATCHED BY SOURCE)
163
163
0 , # deleted rows
@@ -540,7 +540,7 @@ def test_history(self) -> None:
540
540
[Row ("Overwrite" )],
541
541
StructType ([StructField ("operationParameters.mode" , StringType (), True )]))
542
542
543
- def test_cdc (self ):
543
+ def test_cdc (self ) -> None :
544
544
self .spark .range (0 , 5 ).write .format ("delta" ).save (self .tempFile )
545
545
deltaTable = DeltaTable .forPath (self .spark , self .tempFile )
546
546
# Enable Change Data Feed
@@ -971,10 +971,10 @@ def test_replace_table_behavior(self) -> None:
971
971
972
972
def test_verify_paritionedBy_compatibility (self ) -> None :
973
973
try :
974
- from pyspark .sql .column import _to_seq # type: ignore[import-not-found ]
974
+ from pyspark .sql .column import _to_seq # type: ignore[attr-defined ]
975
975
except ImportError :
976
976
# Spark 4
977
- from pyspark .sql .classic .column import _to_seq # type: ignore
977
+ from pyspark .sql .classic .column import _to_seq # type: ignore[attr-defined]
978
978
979
979
with self .table ("testTable" ):
980
980
tableBuilder = DeltaTable .create (self .spark ).tableName ("testTable" ) \
0 commit comments