|
178 | 178 | "# Loading Data\n", |
179 | 179 | "\n", |
180 | 180 | "X, y = iris_data()\n", |
181 | | - "X = X[:, [0, 3]] # sepal length and petal width\n", |
182 | | - "X = X[0:100] # class 0 and class 1\n", |
183 | | - "y = y[0:100] # class 0 and class 1\n", |
| 181 | + "X = X[:, [0, 3]] # sepal length and petal width\n", |
| 182 | + "X = X[0:100] # class 0 and class 1\n", |
| 183 | + "y = y[0:100] # class 0 and class 1\n", |
184 | 184 | "\n", |
185 | 185 | "# standardize\n", |
186 | | - "X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()\n", |
187 | | - "X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()\n", |
| 186 | + "X[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()\n", |
| 187 | + "X[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()\n", |
188 | 188 | "\n", |
189 | 189 | "\n", |
190 | | - "ada = Adaline(epochs=30, \n", |
191 | | - " eta=0.01, \n", |
192 | | - " minibatches=None, \n", |
193 | | - " random_seed=1)\n", |
| 190 | + "ada = Adaline(epochs=30, eta=0.01, minibatches=None, random_seed=1)\n", |
194 | 191 | "ada.fit(X, y)\n", |
195 | 192 | "plot_decision_regions(X, y, clf=ada)\n", |
196 | | - "plt.title('Adaline - Closed Form')\n", |
| 193 | + "plt.title(\"Adaline - Closed Form\")\n", |
197 | 194 | "\n", |
198 | 195 | "plt.show()" |
199 | 196 | ] |
|
261 | 258 | "# Loading Data\n", |
262 | 259 | "\n", |
263 | 260 | "X, y = iris_data()\n", |
264 | | - "X = X[:, [0, 3]] # sepal length and petal width\n", |
265 | | - "X = X[0:100] # class 0 and class 1\n", |
266 | | - "y = y[0:100] # class 0 and class 1\n", |
| 261 | + "X = X[:, [0, 3]] # sepal length and petal width\n", |
| 262 | + "X = X[0:100] # class 0 and class 1\n", |
| 263 | + "y = y[0:100] # class 0 and class 1\n", |
267 | 264 | "\n", |
268 | 265 | "# standardize\n", |
269 | | - "X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()\n", |
270 | | - "X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()\n", |
| 266 | + "X[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()\n", |
| 267 | + "X[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()\n", |
271 | 268 | "\n", |
272 | 269 | "\n", |
273 | | - "ada = Adaline(epochs=30, \n", |
274 | | - " eta=0.01, \n", |
275 | | - " minibatches=1, # for Gradient Descent Learning\n", |
276 | | - " random_seed=1,\n", |
277 | | - " print_progress=3)\n", |
| 270 | + "ada = Adaline(\n", |
| 271 | + " epochs=30,\n", |
| 272 | + " eta=0.01,\n", |
| 273 | + " minibatches=1, # for Gradient Descent Learning\n", |
| 274 | + " random_seed=1,\n", |
| 275 | + " print_progress=3,\n", |
| 276 | + ")\n", |
278 | 277 | "\n", |
279 | 278 | "ada.fit(X, y)\n", |
280 | 279 | "plot_decision_regions(X, y, clf=ada)\n", |
281 | | - "plt.title('Adaline - Gradient Descent')\n", |
| 280 | + "plt.title(\"Adaline - Gradient Descent\")\n", |
282 | 281 | "plt.show()\n", |
283 | 282 | "\n", |
284 | 283 | "plt.plot(range(len(ada.cost_)), ada.cost_)\n", |
285 | | - "plt.xlabel('Iterations')\n", |
286 | | - "plt.ylabel('Cost')" |
| 284 | + "plt.xlabel(\"Iterations\")\n", |
| 285 | + "plt.ylabel(\"Cost\")" |
287 | 286 | ] |
288 | 287 | }, |
289 | 288 | { |
|
339 | 338 | "# Loading Data\n", |
340 | 339 | "\n", |
341 | 340 | "X, y = iris_data()\n", |
342 | | - "X = X[:, [0, 3]] # sepal length and petal width\n", |
343 | | - "X = X[0:100] # class 0 and class 1\n", |
344 | | - "y = y[0:100] # class 0 and class 1\n", |
| 341 | + "X = X[:, [0, 3]] # sepal length and petal width\n", |
| 342 | + "X = X[0:100] # class 0 and class 1\n", |
| 343 | + "y = y[0:100] # class 0 and class 1\n", |
345 | 344 | "\n", |
346 | 345 | "# standardize\n", |
347 | | - "X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()\n", |
348 | | - "X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()\n", |
| 346 | + "X[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()\n", |
| 347 | + "X[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()\n", |
349 | 348 | "\n", |
350 | 349 | "\n", |
351 | | - "ada = Adaline(epochs=15, \n", |
352 | | - " eta=0.02, \n", |
353 | | - " minibatches=len(y), # for SGD learning \n", |
354 | | - " random_seed=1,\n", |
355 | | - " print_progress=3)\n", |
| 350 | + "ada = Adaline(\n", |
| 351 | + " epochs=15,\n", |
| 352 | + " eta=0.02,\n", |
| 353 | + " minibatches=len(y), # for SGD learning\n", |
| 354 | + " random_seed=1,\n", |
| 355 | + " print_progress=3,\n", |
| 356 | + ")\n", |
356 | 357 | "\n", |
357 | 358 | "ada.fit(X, y)\n", |
358 | 359 | "plot_decision_regions(X, y, clf=ada)\n", |
359 | | - "plt.title('Adaline - Stochastic Gradient Descent')\n", |
| 360 | + "plt.title(\"Adaline - Stochastic Gradient Descent\")\n", |
360 | 361 | "plt.show()\n", |
361 | 362 | "\n", |
362 | 363 | "plt.plot(range(len(ada.cost_)), ada.cost_)\n", |
363 | | - "plt.xlabel('Iterations')\n", |
364 | | - "plt.ylabel('Cost')\n", |
| 364 | + "plt.xlabel(\"Iterations\")\n", |
| 365 | + "plt.ylabel(\"Cost\")\n", |
365 | 366 | "plt.show()" |
366 | 367 | ] |
367 | 368 | }, |
|
418 | 419 | "# Loading Data\n", |
419 | 420 | "\n", |
420 | 421 | "X, y = iris_data()\n", |
421 | | - "X = X[:, [0, 3]] # sepal length and petal width\n", |
422 | | - "X = X[0:100] # class 0 and class 1\n", |
423 | | - "y = y[0:100] # class 0 and class 1\n", |
| 422 | + "X = X[:, [0, 3]] # sepal length and petal width\n", |
| 423 | + "X = X[0:100] # class 0 and class 1\n", |
| 424 | + "y = y[0:100] # class 0 and class 1\n", |
424 | 425 | "\n", |
425 | 426 | "# standardize\n", |
426 | | - "X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()\n", |
427 | | - "X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()\n", |
| 427 | + "X[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()\n", |
| 428 | + "X[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()\n", |
428 | 429 | "\n", |
429 | 430 | "\n", |
430 | | - "ada = Adaline(epochs=15, \n", |
431 | | - " eta=0.02, \n", |
432 | | - " minibatches=5, # for SGD learning w. minibatch size 20\n", |
433 | | - " random_seed=1,\n", |
434 | | - " print_progress=3)\n", |
| 431 | + "ada = Adaline(\n", |
| 432 | + " epochs=15,\n", |
| 433 | + " eta=0.02,\n", |
| 434 | + " minibatches=5, # for SGD learning w. minibatch size 20\n", |
| 435 | + " random_seed=1,\n", |
| 436 | + " print_progress=3,\n", |
| 437 | + ")\n", |
435 | 438 | "\n", |
436 | 439 | "ada.fit(X, y)\n", |
437 | 440 | "plot_decision_regions(X, y, clf=ada)\n", |
438 | | - "plt.title('Adaline - Stochastic Gradient Descent w. Minibatches')\n", |
| 441 | + "plt.title(\"Adaline - Stochastic Gradient Descent w. Minibatches\")\n", |
439 | 442 | "plt.show()\n", |
440 | 443 | "\n", |
441 | 444 | "plt.plot(range(len(ada.cost_)), ada.cost_)\n", |
442 | | - "plt.xlabel('Iterations')\n", |
443 | | - "plt.ylabel('Cost')\n", |
| 445 | + "plt.xlabel(\"Iterations\")\n", |
| 446 | + "plt.ylabel(\"Cost\")\n", |
444 | 447 | "plt.show()" |
445 | 448 | ] |
446 | 449 | }, |
|
641 | 644 | } |
642 | 645 | ], |
643 | 646 | "source": [ |
644 | | - "with open('../../api_modules/mlxtend.classifier/Adaline.md', 'r') as f:\n", |
| 647 | + "with open(\"../../api_modules/mlxtend.classifier/Adaline.md\", \"r\") as f:\n", |
645 | 648 | " print(f.read())" |
646 | 649 | ] |
647 | 650 | } |
|
0 commit comments