Skip to content

Commit e4dfa25

Browse files
committed
Update two class model CV
1 parent b710573 commit e4dfa25

6 files changed

+919
-704
lines changed

CNN model to Recommend Comperhancive DDIs/.ipynb_checkpoints/P04_select and evaluate CNN on triple DDI(-1,+1)-checkpoint.ipynb

+435-39
Large diffs are not rendered by default.

CNN model to Recommend Comperhancive DDIs/.ipynb_checkpoints/P05_10fold CV on CNN on deg and enh DDI.py-checkpoint.ipynb

+28-131
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
"cells": [
33
{
44
"cell_type": "code",
5-
"execution_count": 14,
5+
"execution_count": 1,
66
"metadata": {},
77
"outputs": [],
88
"source": [
@@ -17,13 +17,14 @@
1717
"from sklearn.metrics import confusion_matrix,classification_report,precision_score\n",
1818
"import keras\n",
1919
"\n",
20+
"from keras.utils import to_categorical\n",
2021
"from keras.models import Sequential\n",
2122
"from keras.layers import Dense, Conv2D, Flatten, Softmax, Dropout"
2223
]
2324
},
2425
{
2526
"cell_type": "code",
26-
"execution_count": 15,
27+
"execution_count": 2,
2728
"metadata": {},
2829
"outputs": [],
2930
"source": [
@@ -32,7 +33,7 @@
3233
},
3334
{
3435
"cell_type": "code",
35-
"execution_count": 16,
36+
"execution_count": 3,
3637
"metadata": {},
3738
"outputs": [],
3839
"source": [
@@ -47,133 +48,29 @@
4748
},
4849
{
4950
"cell_type": "code",
50-
"execution_count": null,
51-
"metadata": {
52-
"scrolled": false
53-
},
51+
"execution_count": 4,
52+
"metadata": {},
5453
"outputs": [
5554
{
5655
"name": "stdout",
5756
"output_type": "stream",
5857
"text": [
5958
"0 4269\n",
60-
"Model: \"sequential_14\"\n",
61-
"_________________________________________________________________\n",
62-
"Layer (type) Output Shape Param # \n",
63-
"=================================================================\n",
64-
"conv2d_40 (Conv2D) (None, 13, 68, 128) 2176 \n",
65-
"_________________________________________________________________\n",
66-
"conv2d_41 (Conv2D) (None, 10, 65, 32) 65568 \n",
67-
"_________________________________________________________________\n",
68-
"conv2d_42 (Conv2D) (None, 7, 62, 8) 4104 \n",
69-
"_________________________________________________________________\n",
70-
"flatten_14 (Flatten) (None, 3472) 0 \n",
71-
"_________________________________________________________________\n",
72-
"dense_40 (Dense) (None, 1024) 3556352 \n",
73-
"_________________________________________________________________\n",
74-
"dropout_14 (Dropout) (None, 1024) 0 \n",
75-
"_________________________________________________________________\n",
76-
"dense_41 (Dense) (None, 64) 65600 \n",
77-
"_________________________________________________________________\n",
78-
"dense_42 (Dense) (None, 2) 130 \n",
79-
"_________________________________________________________________\n",
80-
"softmax_14 (Softmax) (None, 2) 0 \n",
81-
"=================================================================\n",
82-
"Total params: 3,693,930\n",
83-
"Trainable params: 3,693,930\n",
84-
"Non-trainable params: 0\n",
85-
"_________________________________________________________________\n",
86-
"Train on 38432 samples, validate on 4270 samples\n",
87-
"Epoch 1/5\n",
88-
"38432/38432 [==============================] - 428s 11ms/step - loss: 0.3536 - acc: 0.8413 - val_loss: 0.2844 - val_acc: 0.8696\n",
89-
"Epoch 2/5\n",
90-
"38432/38432 [==============================] - 413s 11ms/step - loss: 0.2116 - acc: 0.8988 - val_loss: 0.2357 - val_acc: 0.8925\n",
91-
"Epoch 3/5\n",
92-
"38432/38432 [==============================] - 409s 11ms/step - loss: 0.1415 - acc: 0.9365 - val_loss: 0.2136 - val_acc: 0.9281\n",
93-
"Epoch 4/5\n",
94-
"38432/38432 [==============================] - 410s 11ms/step - loss: 0.0970 - acc: 0.9614 - val_loss: 0.1996 - val_acc: 0.9391\n",
95-
"Epoch 5/5\n",
96-
"38432/38432 [==============================] - 400s 10ms/step - loss: 0.0701 - acc: 0.9722 - val_loss: 0.2091 - val_acc: 0.9405\n",
97-
"[[1.5804605e-01 8.4195399e-01]\n",
98-
" [8.7760374e-02 9.1223961e-01]\n",
99-
" [1.2026753e-07 9.9999988e-01]\n",
100-
" [4.8302559e-07 9.9999952e-01]]\n",
101-
"0.9316051153491909 0.9751231036347594\n",
102-
"0.9923376598970335 0.9751242356965608\n",
103-
"4270 8539\n",
104-
"Model: \"sequential_15\"\n",
105-
"_________________________________________________________________\n",
106-
"Layer (type) Output Shape Param # \n",
107-
"=================================================================\n",
108-
"conv2d_43 (Conv2D) (None, 13, 68, 128) 2176 \n",
109-
"_________________________________________________________________\n",
110-
"conv2d_44 (Conv2D) (None, 10, 65, 32) 65568 \n",
111-
"_________________________________________________________________\n",
112-
"conv2d_45 (Conv2D) (None, 7, 62, 8) 4104 \n",
113-
"_________________________________________________________________\n",
114-
"flatten_15 (Flatten) (None, 3472) 0 \n",
115-
"_________________________________________________________________\n",
116-
"dense_43 (Dense) (None, 1024) 3556352 \n",
117-
"_________________________________________________________________\n",
118-
"dropout_15 (Dropout) (None, 1024) 0 \n",
119-
"_________________________________________________________________\n",
120-
"dense_44 (Dense) (None, 64) 65600 \n",
121-
"_________________________________________________________________\n",
122-
"dense_45 (Dense) (None, 2) 130 \n",
123-
"_________________________________________________________________\n",
124-
"softmax_15 (Softmax) (None, 2) 0 \n",
125-
"=================================================================\n",
126-
"Total params: 3,693,930\n",
127-
"Trainable params: 3,693,930\n",
128-
"Non-trainable params: 0\n",
129-
"_________________________________________________________________\n",
130-
"Train on 38432 samples, validate on 4270 samples\n",
131-
"Epoch 1/5\n",
132-
"38432/38432 [==============================] - 418s 11ms/step - loss: 0.3661 - acc: 0.8375 - val_loss: 0.2760 - val_acc: 0.8735\n",
133-
"Epoch 2/5\n",
134-
"38432/38432 [==============================] - 492s 13ms/step - loss: 0.2064 - acc: 0.9122 - val_loss: 0.1927 - val_acc: 0.9255\n",
135-
"Epoch 3/5\n",
136-
"38432/38432 [==============================] - 393s 10ms/step - loss: 0.1194 - acc: 0.9511 - val_loss: 0.1704 - val_acc: 0.9321\n",
137-
"Epoch 4/5\n",
138-
"38432/38432 [==============================] - 404s 11ms/step - loss: 0.0718 - acc: 0.9698 - val_loss: 0.2004 - val_acc: 0.9309\n",
139-
"Epoch 5/5\n",
140-
"38432/38432 [==============================] - 462s 12ms/step - loss: 0.0447 - acc: 0.9825 - val_loss: 0.1795 - val_acc: 0.9480\n",
141-
"[[5.8844958e-02 9.4115502e-01]\n",
142-
" [5.1369634e-02 9.4863033e-01]\n",
143-
" [8.2593874e-07 9.9999917e-01]\n",
144-
" [8.7683333e-07 9.9999917e-01]]\n",
145-
"0.9490972107001205 0.9805643929169495\n",
146-
"0.9942582347070612 0.9805645567065966\n",
147-
"8540 12809\n",
148-
"Model: \"sequential_16\"\n",
149-
"_________________________________________________________________\n",
150-
"Layer (type) Output Shape Param # \n",
151-
"=================================================================\n",
152-
"conv2d_46 (Conv2D) (None, 13, 68, 128) 2176 \n",
153-
"_________________________________________________________________\n",
154-
"conv2d_47 (Conv2D) (None, 10, 65, 32) 65568 \n",
155-
"_________________________________________________________________\n",
156-
"conv2d_48 (Conv2D) (None, 7, 62, 8) 4104 \n",
157-
"_________________________________________________________________\n",
158-
"flatten_16 (Flatten) (None, 3472) 0 \n",
159-
"_________________________________________________________________\n",
160-
"dense_46 (Dense) (None, 1024) 3556352 \n",
161-
"_________________________________________________________________\n",
162-
"dropout_16 (Dropout) (None, 1024) 0 \n",
163-
"_________________________________________________________________\n",
164-
"dense_47 (Dense) (None, 64) 65600 \n",
165-
"_________________________________________________________________\n",
166-
"dense_48 (Dense) (None, 2) 130 \n",
167-
"_________________________________________________________________\n",
168-
"softmax_16 (Softmax) (None, 2) 0 \n",
169-
"=================================================================\n",
170-
"Total params: 3,693,930\n",
171-
"Trainable params: 3,693,930\n",
172-
"Non-trainable params: 0\n",
173-
"_________________________________________________________________\n",
174-
"Train on 38432 samples, validate on 4270 samples\n",
175-
"Epoch 1/5\n",
176-
" 2144/38432 [>.............................] - ETA: 8:00 - loss: 0.5269 - acc: 0.7836"
59+
"[1. 1. 0. 0. 1.] [0. 0. 1. 1. 0.]\n"
60+
]
61+
},
62+
{
63+
"ename": "InvalidArgumentError",
64+
"evalue": "Exception encountered when calling layer \"softmax\" (type Softmax).\n\n`dim` must be in the range [-2, 2) where 2 is the number of dimensions in the input. Received: dim=128\n\nCall arguments received by layer \"softmax\" (type Softmax):\n • inputs=tf.Tensor(shape=(None, 2), dtype=float32)\n • mask=None",
65+
"output_type": "error",
66+
"traceback": [
67+
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
68+
"\u001b[1;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)",
69+
"Cell \u001b[1;32mIn[4], line 55\u001b[0m\n\u001b[0;32m 53\u001b[0m model\u001b[38;5;241m.\u001b[39madd(Dense( \u001b[38;5;241m64\u001b[39m, activation\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mrelu\u001b[39m\u001b[38;5;124m'\u001b[39m))\n\u001b[0;32m 54\u001b[0m model\u001b[38;5;241m.\u001b[39madd(Dense( \u001b[38;5;241m2\u001b[39m, activation\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mrelu\u001b[39m\u001b[38;5;124m'\u001b[39m))\n\u001b[1;32m---> 55\u001b[0m \u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43madd\u001b[49m\u001b[43m(\u001b[49m\u001b[43mSoftmax\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m128\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 56\u001b[0m \u001b[38;5;66;03m# model.summary()\u001b[39;00m\n\u001b[0;32m 57\u001b[0m \n\u001b[0;32m 58\u001b[0m \n\u001b[0;32m 59\u001b[0m \u001b[38;5;66;03m#compile model using accuracy to measure model performance\u001b[39;00m\n\u001b[0;32m 60\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mkeras\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m optimizers\n",
70+
"File \u001b[1;32mc:\\program files\\python38\\lib\\site-packages\\tensorflow\\python\\trackable\\base.py:204\u001b[0m, in \u001b[0;36mno_automatic_dependency_tracking.<locals>._method_wrapper\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 202\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_self_setattr_tracking \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mFalse\u001b[39;00m \u001b[38;5;66;03m# pylint: disable=protected-access\u001b[39;00m\n\u001b[0;32m 203\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m--> 204\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43mmethod\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 205\u001b[0m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[0;32m 206\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_self_setattr_tracking \u001b[38;5;241m=\u001b[39m previous_value \u001b[38;5;66;03m# pylint: disable=protected-access\u001b[39;00m\n",
71+
"File \u001b[1;32mc:\\program files\\python38\\lib\\site-packages\\keras\\src\\utils\\traceback_utils.py:70\u001b[0m, in \u001b[0;36mfilter_traceback.<locals>.error_handler\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 67\u001b[0m filtered_tb \u001b[38;5;241m=\u001b[39m _process_traceback_frames(e\u001b[38;5;241m.\u001b[39m__traceback__)\n\u001b[0;32m 68\u001b[0m \u001b[38;5;66;03m# To get the full stack trace, call:\u001b[39;00m\n\u001b[0;32m 69\u001b[0m \u001b[38;5;66;03m# `tf.debugging.disable_traceback_filtering()`\u001b[39;00m\n\u001b[1;32m---> 70\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\u001b[38;5;241m.\u001b[39mwith_traceback(filtered_tb) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m 71\u001b[0m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[0;32m 72\u001b[0m \u001b[38;5;28;01mdel\u001b[39;00m filtered_tb\n",
72+
"File \u001b[1;32mc:\\program files\\python38\\lib\\site-packages\\keras\\src\\backend.py:5444\u001b[0m, in \u001b[0;36msoftmax\u001b[1;34m(x, axis)\u001b[0m\n\u001b[0;32m 5430\u001b[0m \u001b[38;5;129m@keras_export\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mkeras.backend.softmax\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m 5431\u001b[0m \u001b[38;5;129m@tf\u001b[39m\u001b[38;5;241m.\u001b[39m__internal__\u001b[38;5;241m.\u001b[39mdispatch\u001b[38;5;241m.\u001b[39madd_dispatch_support\n\u001b[0;32m 5432\u001b[0m \u001b[38;5;129m@doc_controls\u001b[39m\u001b[38;5;241m.\u001b[39mdo_not_generate_docs\n\u001b[0;32m 5433\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21msoftmax\u001b[39m(x, axis\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m):\n\u001b[0;32m 5434\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Softmax of a tensor.\u001b[39;00m\n\u001b[0;32m 5435\u001b[0m \n\u001b[0;32m 5436\u001b[0m \u001b[38;5;124;03m Args:\u001b[39;00m\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 5442\u001b[0m \u001b[38;5;124;03m A tensor.\u001b[39;00m\n\u001b[0;32m 5443\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[1;32m-> 5444\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mtf\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mnn\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msoftmax\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43maxis\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43maxis\u001b[49m\u001b[43m)\u001b[49m\n",
73+
"\u001b[1;31mInvalidArgumentError\u001b[0m: Exception encountered when calling layer \"softmax\" (type Softmax).\n\n`dim` must be in the range [-2, 2) where 2 is the number of dimensions in the input. Received: dim=128\n\nCall arguments received by layer \"softmax\" (type Softmax):\n • inputs=tf.Tensor(shape=(None, 2), dtype=float32)\n • mask=None"
17774
]
17875
}
17976
],
@@ -201,8 +98,8 @@
20198
" testNum = len(X_test)\n",
20299
"\n",
203100
" #reshape data to fit model\n",
204-
" X_train = X_train.reshape(trainNum,16,71,1)\n",
205-
" X_test = X_test.reshape(testNum,16,71,1)\n",
101+
" X_train = X_train.reshape(trainNum,16,71,1).astype('float32')\n",
102+
" X_test = X_test.reshape(testNum,16,71,1).astype('float32')\n",
206103
"\n",
207104
" y_train = y_train + 1\n",
208105
" y_test = y_test + 1\n",
@@ -212,8 +109,8 @@
212109
"\n",
213110
" from keras.utils import to_categorical\n",
214111
" #one-hot encode target column\n",
215-
" y_train = to_categorical(y_train)\n",
216-
" y_test = to_categorical(y_test)\n",
112+
" y_train = to_categorical(y_train).astype(int)\n",
113+
" y_test = to_categorical(y_test).astype(int)\n",
217114
" # y_test[0]\n",
218115
" #create model\n",
219116
" model = Sequential()\n",
@@ -374,7 +271,7 @@
374271
],
375272
"metadata": {
376273
"kernelspec": {
377-
"display_name": "Python 3",
274+
"display_name": "Python 3 (ipykernel)",
378275
"language": "python",
379276
"name": "python3"
380277
},
@@ -388,9 +285,9 @@
388285
"name": "python",
389286
"nbconvert_exporter": "python",
390287
"pygments_lexer": "ipython3",
391-
"version": "3.6.9"
288+
"version": "3.8.10"
392289
}
393290
},
394291
"nbformat": 4,
395-
"nbformat_minor": 2
292+
"nbformat_minor": 4
396293
}

0 commit comments

Comments
 (0)