Skip to content

Commit 578b79f

Browse files
committed
Fix #717: Corrected PCA sign flip and updated test compatibility for Python 3.12
1 parent 4a4a84a commit 578b79f

File tree

134 files changed

+3669
-2447
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

134 files changed

+3669
-2447
lines changed

docs/ipynb2markdown.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,10 @@
99

1010
import glob
1111
import os
12-
import shutil
1312
import subprocess
1413

15-
import markdown
1614
from markdown.extensions import Extension
1715
from markdown.treeprocessors import Treeprocessor
18-
from nbconvert.exporters import MarkdownExporter
1916

2017

2118
class ImgExtractor(Treeprocessor):

docs/make_api.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -416,13 +416,13 @@ def summarize_methdods_and_functions(
416416
"-o1",
417417
"--output_module_api",
418418
default="../docs/sources/api_modules",
419-
help=("Target directory for the module-level" " API Markdown files"),
419+
help=("Target directory for the module-level API Markdown files"),
420420
)
421421
parser.add_argument(
422422
"-o2",
423423
"--output_subpackage_api",
424424
default="../docs/sources/api_subpackages",
425-
help=("Target directory for the" " subpackage-level API Markdown files"),
425+
help=("Target directory for the subpackage-level API Markdown files"),
426426
)
427427
parser.add_argument(
428428
"-c", "--clean", action="store_true", help="Remove previous API files"
@@ -459,5 +459,5 @@ def summarize_methdods_and_functions(
459459
out_dir=args.output_subpackage_api,
460460
printlog=not (args.silent),
461461
clean=args.clean,
462-
str_above_header=("mlxtend" " version: %s \n" % (package.__version__)),
462+
str_above_header=("mlxtend version: %s \n" % (package.__version__)),
463463
)

docs/sources/user_guide/classifier/Adaline.ipynb

Lines changed: 53 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -178,22 +178,19 @@
178178
"# Loading Data\n",
179179
"\n",
180180
"X, y = iris_data()\n",
181-
"X = X[:, [0, 3]] # sepal length and petal width\n",
182-
"X = X[0:100] # class 0 and class 1\n",
183-
"y = y[0:100] # class 0 and class 1\n",
181+
"X = X[:, [0, 3]] # sepal length and petal width\n",
182+
"X = X[0:100] # class 0 and class 1\n",
183+
"y = y[0:100] # class 0 and class 1\n",
184184
"\n",
185185
"# standardize\n",
186-
"X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()\n",
187-
"X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()\n",
186+
"X[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()\n",
187+
"X[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()\n",
188188
"\n",
189189
"\n",
190-
"ada = Adaline(epochs=30, \n",
191-
" eta=0.01, \n",
192-
" minibatches=None, \n",
193-
" random_seed=1)\n",
190+
"ada = Adaline(epochs=30, eta=0.01, minibatches=None, random_seed=1)\n",
194191
"ada.fit(X, y)\n",
195192
"plot_decision_regions(X, y, clf=ada)\n",
196-
"plt.title('Adaline - Closed Form')\n",
193+
"plt.title(\"Adaline - Closed Form\")\n",
197194
"\n",
198195
"plt.show()"
199196
]
@@ -261,29 +258,31 @@
261258
"# Loading Data\n",
262259
"\n",
263260
"X, y = iris_data()\n",
264-
"X = X[:, [0, 3]] # sepal length and petal width\n",
265-
"X = X[0:100] # class 0 and class 1\n",
266-
"y = y[0:100] # class 0 and class 1\n",
261+
"X = X[:, [0, 3]] # sepal length and petal width\n",
262+
"X = X[0:100] # class 0 and class 1\n",
263+
"y = y[0:100] # class 0 and class 1\n",
267264
"\n",
268265
"# standardize\n",
269-
"X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()\n",
270-
"X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()\n",
266+
"X[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()\n",
267+
"X[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()\n",
271268
"\n",
272269
"\n",
273-
"ada = Adaline(epochs=30, \n",
274-
" eta=0.01, \n",
275-
" minibatches=1, # for Gradient Descent Learning\n",
276-
" random_seed=1,\n",
277-
" print_progress=3)\n",
270+
"ada = Adaline(\n",
271+
" epochs=30,\n",
272+
" eta=0.01,\n",
273+
" minibatches=1, # for Gradient Descent Learning\n",
274+
" random_seed=1,\n",
275+
" print_progress=3,\n",
276+
")\n",
278277
"\n",
279278
"ada.fit(X, y)\n",
280279
"plot_decision_regions(X, y, clf=ada)\n",
281-
"plt.title('Adaline - Gradient Descent')\n",
280+
"plt.title(\"Adaline - Gradient Descent\")\n",
282281
"plt.show()\n",
283282
"\n",
284283
"plt.plot(range(len(ada.cost_)), ada.cost_)\n",
285-
"plt.xlabel('Iterations')\n",
286-
"plt.ylabel('Cost')"
284+
"plt.xlabel(\"Iterations\")\n",
285+
"plt.ylabel(\"Cost\")"
287286
]
288287
},
289288
{
@@ -339,29 +338,31 @@
339338
"# Loading Data\n",
340339
"\n",
341340
"X, y = iris_data()\n",
342-
"X = X[:, [0, 3]] # sepal length and petal width\n",
343-
"X = X[0:100] # class 0 and class 1\n",
344-
"y = y[0:100] # class 0 and class 1\n",
341+
"X = X[:, [0, 3]] # sepal length and petal width\n",
342+
"X = X[0:100] # class 0 and class 1\n",
343+
"y = y[0:100] # class 0 and class 1\n",
345344
"\n",
346345
"# standardize\n",
347-
"X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()\n",
348-
"X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()\n",
346+
"X[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()\n",
347+
"X[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()\n",
349348
"\n",
350349
"\n",
351-
"ada = Adaline(epochs=15, \n",
352-
" eta=0.02, \n",
353-
" minibatches=len(y), # for SGD learning \n",
354-
" random_seed=1,\n",
355-
" print_progress=3)\n",
350+
"ada = Adaline(\n",
351+
" epochs=15,\n",
352+
" eta=0.02,\n",
353+
" minibatches=len(y), # for SGD learning\n",
354+
" random_seed=1,\n",
355+
" print_progress=3,\n",
356+
")\n",
356357
"\n",
357358
"ada.fit(X, y)\n",
358359
"plot_decision_regions(X, y, clf=ada)\n",
359-
"plt.title('Adaline - Stochastic Gradient Descent')\n",
360+
"plt.title(\"Adaline - Stochastic Gradient Descent\")\n",
360361
"plt.show()\n",
361362
"\n",
362363
"plt.plot(range(len(ada.cost_)), ada.cost_)\n",
363-
"plt.xlabel('Iterations')\n",
364-
"plt.ylabel('Cost')\n",
364+
"plt.xlabel(\"Iterations\")\n",
365+
"plt.ylabel(\"Cost\")\n",
365366
"plt.show()"
366367
]
367368
},
@@ -418,29 +419,31 @@
418419
"# Loading Data\n",
419420
"\n",
420421
"X, y = iris_data()\n",
421-
"X = X[:, [0, 3]] # sepal length and petal width\n",
422-
"X = X[0:100] # class 0 and class 1\n",
423-
"y = y[0:100] # class 0 and class 1\n",
422+
"X = X[:, [0, 3]] # sepal length and petal width\n",
423+
"X = X[0:100] # class 0 and class 1\n",
424+
"y = y[0:100] # class 0 and class 1\n",
424425
"\n",
425426
"# standardize\n",
426-
"X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()\n",
427-
"X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()\n",
427+
"X[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()\n",
428+
"X[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()\n",
428429
"\n",
429430
"\n",
430-
"ada = Adaline(epochs=15, \n",
431-
" eta=0.02, \n",
432-
" minibatches=5, # for SGD learning w. minibatch size 20\n",
433-
" random_seed=1,\n",
434-
" print_progress=3)\n",
431+
"ada = Adaline(\n",
432+
" epochs=15,\n",
433+
" eta=0.02,\n",
434+
" minibatches=5, # for SGD learning w. minibatch size 20\n",
435+
" random_seed=1,\n",
436+
" print_progress=3,\n",
437+
")\n",
435438
"\n",
436439
"ada.fit(X, y)\n",
437440
"plot_decision_regions(X, y, clf=ada)\n",
438-
"plt.title('Adaline - Stochastic Gradient Descent w. Minibatches')\n",
441+
"plt.title(\"Adaline - Stochastic Gradient Descent w. Minibatches\")\n",
439442
"plt.show()\n",
440443
"\n",
441444
"plt.plot(range(len(ada.cost_)), ada.cost_)\n",
442-
"plt.xlabel('Iterations')\n",
443-
"plt.ylabel('Cost')\n",
445+
"plt.xlabel(\"Iterations\")\n",
446+
"plt.ylabel(\"Cost\")\n",
444447
"plt.show()"
445448
]
446449
},
@@ -641,7 +644,7 @@
641644
}
642645
],
643646
"source": [
644-
"with open('../../api_modules/mlxtend.classifier/Adaline.md', 'r') as f:\n",
647+
"with open(\"../../api_modules/mlxtend.classifier/Adaline.md\", \"r\") as f:\n",
645648
" print(f.read())"
646649
]
647650
}

0 commit comments

Comments
 (0)