Skip to content

Commit 90f8e05

Browse files
committed
Changed quote style in tests to use double quotes at the outer level and backslash quotes at the inner level.
Remaining changes are fixes required by ruff.
1 parent 3f89d84 commit 90f8e05

21 files changed

+220
-86
lines changed

conftest.py

+8-2
Original file line numberDiff line numberDiff line change
@@ -152,10 +152,16 @@ def sample_ktensor_3way(): # noqa: D103
152152
def sample_ktensor_symmetric(): # noqa: D103
153153
weights = np.array([1.0, 1.0])
154154
fm0 = np.array(
155-
[[2.340431417384394, 4.951967353890655], [4.596069112758807, 8.012451489774961]]
155+
[
156+
[2.340431417384394, 4.951967353890655],
157+
[4.596069112758807, 8.012451489774961],
158+
]
156159
)
157160
fm1 = np.array(
158-
[[2.340431417384394, 4.951967353890655], [4.596069112758807, 8.012451489774961]]
161+
[
162+
[2.340431417384394, 4.951967353890655],
163+
[4.596069112758807, 8.012451489774961],
164+
]
159165
)
160166
factor_matrices = [fm0, fm1]
161167
data = {"weights": weights, "factor_matrices": factor_matrices}

docs/source/tutorial/algorithm_gcp_opt.ipynb

+19-9
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,7 @@
212212
")\n",
213213
"\n",
214214
"print(\n",
215-
" f\"\\nFinal fit: {1 - np.linalg.norm((X-result_lbfgs.full()).double())/X.norm()} (for comparison to f(x) in CP-ALS)\\n\"\n",
215+
" f\"\\nFinal fit: {1 - np.linalg.norm((X - result_lbfgs.full()).double()) / X.norm()} (for comparison to f(x) in CP-ALS)\\n\"\n",
216216
")"
217217
]
218218
},
@@ -234,7 +234,7 @@
234234
" input_tensor=X, rank=rank, maxiters=2, init=initial_guess\n",
235235
")\n",
236236
"print(\n",
237-
" f\"\\nFinal fit: {1 - np.linalg.norm((X-result_als.full()).double())/X.norm()} (for comparison to f(x) in GCP-OPT)\\n\"\n",
237+
" f\"\\nFinal fit: {1 - np.linalg.norm((X - result_als.full()).double()) / X.norm()} (for comparison to f(x) in GCP-OPT)\\n\"\n",
238238
")"
239239
]
240240
},
@@ -269,7 +269,7 @@
269269
" printitn=1,\n",
270270
")\n",
271271
"print(\n",
272-
" f\"\\nFinal fit: {1 - np.linalg.norm((X-result_adam.full()).double())/X.norm()} (for comparison to f(x) in GCP-OPT & CP-ALS)\\n\"\n",
272+
" f\"\\nFinal fit: {1 - np.linalg.norm((X - result_adam.full()).double()) / X.norm()} (for comparison to f(x) in GCP-OPT & CP-ALS)\\n\"\n",
273273
")"
274274
]
275275
},
@@ -352,7 +352,9 @@
352352
" data=X, rank=rank, objective=objective, optimizer=optimizer, printitn=1\n",
353353
")\n",
354354
"\n",
355-
"print(f\"\\nFinal fit: {1 - np.linalg.norm((X-result_lbfgs.full()).double())/X.norm()}\\n\")"
355+
"print(\n",
356+
" f\"\\nFinal fit: {1 - np.linalg.norm((X - result_lbfgs.full()).double()) / X.norm()}\\n\"\n",
357+
")"
356358
]
357359
},
358360
{
@@ -379,7 +381,9 @@
379381
" data=X, rank=rank, objective=objective, optimizer=optimizer, printitn=1\n",
380382
")\n",
381383
"\n",
382-
"print(f\"\\nFinal fit: {1 - np.linalg.norm((X-result_adam.full()).double())/X.norm()}\\n\")"
384+
"print(\n",
385+
" f\"\\nFinal fit: {1 - np.linalg.norm((X - result_adam.full()).double()) / X.norm()}\\n\"\n",
386+
")"
383387
]
384388
},
385389
{
@@ -453,7 +457,7 @@
453457
"# Convert to sparse tensor, real-valued 0/1 tensor since it was constructed\n",
454458
"# to be sparse\n",
455459
"X = Xfull.to_sptensor()\n",
456-
"print(f\"Proportion of nonzeros in X is {100*X.nnz / np.prod(shape):.2f}%\\n\");"
460+
"print(f\"Proportion of nonzeros in X is {100 * X.nnz / np.prod(shape):.2f}%\\n\");"
457461
]
458462
},
459463
{
@@ -499,7 +503,9 @@
499503
" data=Xfull, rank=rank, objective=objective, optimizer=optimizer\n",
500504
")\n",
501505
"\n",
502-
"print(f\"\\nFinal fit: {1 - np.linalg.norm((X-result_lbfgs.full()).double())/X.norm()}\\n\")"
506+
"print(\n",
507+
" f\"\\nFinal fit: {1 - np.linalg.norm((X - result_lbfgs.full()).double()) / X.norm()}\\n\"\n",
508+
")"
503509
]
504510
},
505511
{
@@ -527,7 +533,9 @@
527533
" data=X, rank=rank, objective=objective, optimizer=optimizer, printitn=1\n",
528534
")\n",
529535
"\n",
530-
"print(f\"\\nFinal fit: {1 - np.linalg.norm((X-result_adam.full()).double())/X.norm()}\\n\")"
536+
"print(\n",
537+
" f\"\\nFinal fit: {1 - np.linalg.norm((X - result_adam.full()).double()) / X.norm()}\\n\"\n",
538+
")"
531539
]
532540
},
533541
{
@@ -592,7 +600,9 @@
592600
" data=X, rank=rank, objective=objective, optimizer=optimizer, printitn=1\n",
593601
")\n",
594602
"\n",
595-
"print(f\"\\nFinal fit: {1 - np.linalg.norm((X-result_adam.full()).double())/X.norm()}\\n\")"
603+
"print(\n",
604+
" f\"\\nFinal fit: {1 - np.linalg.norm((X - result_adam.full()).double()) / X.norm()}\\n\"\n",
605+
")"
596606
]
597607
}
598608
],

docs/source/tutorial/algorithm_hosvd.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@
163163
" # Verbose output\n",
164164
" if verbose:\n",
165165
" print(\n",
166-
" f\"Created block with shape {tuple(bsz[i,:])} with norm ({block.norm()})^2 = {block.norm()**2}\"\n",
166+
" f\"Created block with shape {tuple(bsz[i, :])} with norm ({block.norm()})^2 = {block.norm() ** 2}\"\n",
167167
" )\n",
168168
"\n",
169169
" if dltnrmsqr > 0:\n",
@@ -174,7 +174,7 @@
174174
" G += block\n",
175175
" if verbose:\n",
176176
" print(\n",
177-
" f\"Created tensor with shape {tuple(gsz)} with off-block-diagonal norm ({block.norm()})^2 = {block.norm()**2}\"\n",
177+
" f\"Created tensor with shape {tuple(gsz)} with off-block-diagonal norm ({block.norm()})^2 = {block.norm() ** 2}\"\n",
178178
" )\n",
179179
"\n",
180180
" return G, bsz, bns"

docs/source/tutorial/class_ktensor.ipynb

+32-8
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,11 @@
6969
"outputs": [],
7070
"source": [
7171
"Y = ttb.ktensor(\n",
72-
" [np.random.rand(4, 1), np.random.rand(2, 1), np.random.rand(3, 1)]\n",
72+
" [\n",
73+
" np.random.rand(4, 1),\n",
74+
" np.random.rand(2, 1),\n",
75+
" np.random.rand(3, 1),\n",
76+
" ]\n",
7377
") # Another ktensor.\n",
7478
"Y"
7579
]
@@ -436,10 +440,18 @@
436440
"source": [
437441
"np.random.seed(0)\n",
438442
"X = ttb.ktensor(\n",
439-
" [np.random.rand(4, 2), np.random.rand(2, 2), np.random.rand(3, 2)]\n",
443+
" [\n",
444+
" np.random.rand(4, 2),\n",
445+
" np.random.rand(2, 2),\n",
446+
" np.random.rand(3, 2),\n",
447+
" ]\n",
440448
") # Data.\n",
441449
"Y = ttb.ktensor(\n",
442-
" [np.random.rand(4, 2), np.random.rand(2, 2), np.random.rand(3, 2)]\n",
450+
" [\n",
451+
" np.random.rand(4, 2),\n",
452+
" np.random.rand(2, 2),\n",
453+
" np.random.rand(3, 2),\n",
454+
" ]\n",
443455
") # More data.\n",
444456
"X, Y"
445457
]
@@ -522,7 +534,11 @@
522534
"source": [
523535
"np.random.seed(0)\n",
524536
"X = ttb.ktensor(\n",
525-
" [np.random.rand(4, 2), np.random.rand(2, 2), np.random.rand(3, 2)]\n",
537+
" [\n",
538+
" np.random.rand(4, 2),\n",
539+
" np.random.rand(2, 2),\n",
540+
" np.random.rand(3, 2),\n",
541+
" ]\n",
526542
") # Data.\n",
527543
"X.permute(np.array((1, 2, 0))) # Reorders modes of X."
528544
]
@@ -543,7 +559,11 @@
543559
"source": [
544560
"np.random.seed(0)\n",
545561
"X = ttb.ktensor(\n",
546-
" [np.random.rand(3, 2), np.random.rand(4, 2), np.random.rand(2, 2)]\n",
562+
" [\n",
563+
" np.random.rand(3, 2),\n",
564+
" np.random.rand(4, 2),\n",
565+
" np.random.rand(2, 2),\n",
566+
" ]\n",
547567
") # Unit weights.\n",
548568
"X"
549569
]
@@ -574,7 +594,11 @@
574594
"source": [
575595
"np.random.seed(0)\n",
576596
"X = ttb.ktensor(\n",
577-
" [np.random.rand(4, 2), np.random.rand(2, 2), np.random.rand(3, 2)]\n",
597+
" [\n",
598+
" np.random.rand(4, 2),\n",
599+
" np.random.rand(2, 2),\n",
600+
" np.random.rand(3, 2),\n",
601+
" ]\n",
578602
") # Data.\n",
579603
"Y = X\n",
580604
"Y.factor_matrices[0][:, 0] = -Y.factor_matrices[0][\n",
@@ -629,10 +653,10 @@
629653
"metadata": {},
630654
"outputs": [],
631655
"source": [
632-
"print(f\"U*S*Vh:\\n{U@np.diag(S)@Vh}\")\n",
656+
"print(f\"U*S*Vh:\\n{U @ np.diag(S) @ Vh}\")\n",
633657
"print(\n",
634658
" f\"\\nX.factor_matrices[0]@np.diag(X.weights)@(X.factor_matrices[1].transpose()):\\n\\\n",
635-
"{X.factor_matrices[0]@np.diag(X.weights)@(X.factor_matrices[1].transpose())}\"\n",
659+
"{X.factor_matrices[0] @ np.diag(X.weights) @ (X.factor_matrices[1].transpose())}\"\n",
636660
")\n",
637661
"print(f\"\\nX.full():\\n{X.full()}\") # Reassemble the original matrix."
638662
]

docs/source/tutorial/class_sptenmat.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -363,7 +363,7 @@
363363
"metadata": {},
364364
"outputs": [],
365365
"source": [
366-
"print(f\"Matrix shape: {A.shape}\\n\" f\"Original tensor shape: {A.tshape}\")"
366+
"print(f\"Matrix shape: {A.shape}\\nOriginal tensor shape: {A.tshape}\")"
367367
]
368368
},
369369
{

docs/source/tutorial/class_sptensor.ipynb

+8-1
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,14 @@
6666
"outputs": [],
6767
"source": [
6868
"subs = np.array(\n",
69-
" [[0, 0, 0], [0, 0, 2], [2, 2, 2], [3, 3, 3], [0, 0, 0], [0, 0, 0]]\n",
69+
" [\n",
70+
" [0, 0, 0],\n",
71+
" [0, 0, 2],\n",
72+
" [2, 2, 2],\n",
73+
" [3, 3, 3],\n",
74+
" [0, 0, 0],\n",
75+
" [0, 0, 0],\n",
76+
" ]\n",
7077
") # (1,1,1) is repeated.\n",
7178
"vals = np.array([2, 2, 2, 2, 2, 2])[:, None] # Vals is a column vector.\n",
7279
"X = ttb.sptensor.from_aggregator(subs, vals)\n",

docs/source/tutorial/class_sumtensor.ipynb

+4-4
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@
160160
"metadata": {},
161161
"outputs": [],
162162
"source": [
163-
"print(f\"Ndims: {T.ndims}\\n\" f\"Shape: {T.shape}\")"
163+
"print(f\"Ndims: {T.ndims}\\nShape: {T.shape}\")"
164164
]
165165
},
166166
{
@@ -316,8 +316,8 @@
316316
"outputs": [],
317317
"source": [
318318
"# Equivalent additions despite the order\n",
319-
"print(f\"T+S:\\n{T+S}\\n\")\n",
320-
"print(f\"S+T:\\n{S+T}\")"
319+
"print(f\"T+S:\\n{T + S}\\n\")\n",
320+
"print(f\"S+T:\\n{S + T}\")"
321321
]
322322
},
323323
{
@@ -336,7 +336,7 @@
336336
"metadata": {},
337337
"outputs": [],
338338
"source": [
339-
"print(f\"Part 0:\\n{T.parts[0]}\\n\\n\" f\"Part 1:\\n{T.parts[1]}\")"
339+
"print(f\"Part 0:\\n{T.parts[0]}\\n\\nPart 1:\\n{T.parts[1]}\")"
340340
]
341341
}
342342
],

docs/source/tutorial/class_ttensor.ipynb

+3-3
Original file line numberDiff line numberDiff line change
@@ -538,7 +538,7 @@
538538
"# since the random generation process below wasn't expected\n",
539539
"# to return a low-rank approximation\n",
540540
"print(\n",
541-
" f\"Compression: {X.data.nbytes/(T.core.data.nbytes + np.sum([i.nbytes for i in T.factor_matrices]))} x\"\n",
541+
" f\"Compression: {X.data.nbytes / (T.core.data.nbytes + np.sum([i.nbytes for i in T.factor_matrices]))} x\"\n",
542542
")"
543543
]
544544
},
@@ -584,7 +584,7 @@
584584
"metadata": {},
585585
"outputs": [],
586586
"source": [
587-
"print(f\"Compression: {Xf.data.nbytes/Xslice.data.nbytes} x\")"
587+
"print(f\"Compression: {Xf.data.nbytes / Xslice.data.nbytes} x\")"
588588
]
589589
},
590590
{
@@ -622,7 +622,7 @@
622622
"metadata": {},
623623
"outputs": [],
624624
"source": [
625-
"print(f\"Compression: {Xf.data.nbytes/Xds.data.nbytes} x\")"
625+
"print(f\"Compression: {Xf.data.nbytes / Xds.data.nbytes} x\")"
626626
]
627627
},
628628
{

pyttb/gcp/optimizers.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -234,7 +234,7 @@ def solve( # noqa: PLR0913
234234
"End Main Loop\n"
235235
f"Final f-est: {f_est: 10.4e}\n"
236236
f"Main loop time: {main_time: .2f}\n"
237-
f"Total iterations: {n_epoch*self._epoch_iters}"
237+
f"Total iterations: {n_epoch * self._epoch_iters}"
238238
)
239239
logging.info(msg)
240240

pyttb/gcp_opt.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ def gcp_opt( # noqa: PLR0912,PLR0913
111111
)
112112
if nmissing > 0:
113113
welcome_msg += (
114-
f"Missing entries: {nmissing} ({100*nmissing/tensor_size:.2g}%)"
114+
f"Missing entries: {nmissing} ({100 * nmissing / tensor_size:.2g}%)"
115115
)
116116
logging.info(welcome_msg)
117117

pyttb/hosvd.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ def hosvd( # noqa: PLR0912,PLR0913,PLR0915
143143
relnorm = np.sqrt(diffnormsqr / normxsqr)
144144
print(f"Shape of core: {G.shape}")
145145
if relnorm <= tol:
146-
print(f"||X-T||/||X|| = {relnorm: g} <=" f"{tol: f} (tol)")
146+
print(f"||X-T||/||X|| = {relnorm: g} <={tol: f} (tol)")
147147
else:
148148
print(
149149
"Tolerance not satisfied!! "

pyttb/pyttb_utils.py

+7-5
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,10 @@ def tt_union_rows(MatrixA: np.ndarray, MatrixB: np.ndarray) -> np.ndarray:
7070
MatrixBUnique[np.argsort(idxB)], MatrixAUnique[np.argsort(idxA)]
7171
)
7272
union = np.vstack(
73-
(MatrixB[np.sort(idxB[np.where(location < 0)])], MatrixA[np.sort(idxA)])
73+
(
74+
MatrixB[np.sort(idxB[np.where(location < 0)])],
75+
MatrixA[np.sort(idxA)],
76+
)
7477
)
7578
return union
7679

@@ -861,10 +864,9 @@ def gather_wrap_dims(
861864
+ [i for i in range(ndims - 1, rdims[0], -1)]
862865
)
863866
else:
864-
assert False, (
865-
"Unrecognized value for cdims_cyclic pattern, "
866-
'must be "fc" or "bc".'
867-
)
867+
assert (
868+
False
869+
), 'Unrecognized value for cdims_cyclic pattern, must be "fc" or "bc".'
868870
else:
869871
# Multiple row mapping
870872
cdims = np.setdiff1d(alldims, rdims)

pyttb/sptensor.py

+5-2
Original file line numberDiff line numberDiff line change
@@ -667,7 +667,7 @@ def extract(self, searchsubs: np.ndarray) -> np.ndarray:
667667
badsubs = searchsubs[badloc, :]
668668
for i in np.arange(0, badloc[0].size):
669669
error_msg += f"\tsubscript = {np.array2string(badsubs[i, :])} \n"
670-
assert False, f"{error_msg}" "Invalid subscripts"
670+
assert False, f"{error_msg}Invalid subscripts"
671671

672672
# Set the default answer to zero
673673
a = np.zeros(shape=(p, 1), dtype=self.vals.dtype, order=self.order)
@@ -2578,7 +2578,10 @@ def _set_subtensor(self, key, value): # noqa: PLR0912, PLR0915
25782578
if self.subs.size > 0:
25792579
self.subs = np.vstack((self.subs, addsubs.astype(int)))
25802580
self.vals = np.vstack(
2581-
(self.vals, value * np.ones((addsubs.shape[0], 1)))
2581+
(
2582+
self.vals,
2583+
value * np.ones((addsubs.shape[0], 1)),
2584+
)
25822585
)
25832586
else:
25842587
self.subs = addsubs.astype(int)

0 commit comments

Comments
 (0)