-
Notifications
You must be signed in to change notification settings - Fork 8
Expand file tree
/
Copy pathMain_inductive_SVM.py
More file actions
210 lines (181 loc) · 9.39 KB
/
Main_inductive_SVM.py
File metadata and controls
210 lines (181 loc) · 9.39 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
import torch
import numpy as np
import sys, copy, math, time, pdb
import pickle as cPickle
#import cPickle as pickle
import scipy.io as sio
import scipy.sparse as ssp
import os.path
import random
import argparse
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.svm import LinearSVC
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_curve
import matplotlib.pyplot as plt
from inspect import signature
sys.path.append('%s/software/pytorch_DGCNN' % os.path.dirname(os.path.realpath(__file__)))
from main import *
from util_functions import *
parser = argparse.ArgumentParser(description='baseline SVM to compare with GRGNN')
# general settings
parser.add_argument('--traindata-name', default='data3', help='train network name')
parser.add_argument('--traindata-name2', default=None, help='train network name2')
parser.add_argument('--testdata-name', default='data4', help='test network name')
parser.add_argument('--max-train-num', type=int, default=100000,
help='set maximum number of train links (to fit into memory)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# Pearson correlation
parser.add_argument('--embedding-dim', type=int, default=1,
help='embedding dimmension')
parser.add_argument('--pearson_net', type=float, default=0.8,
help='pearson correlation as the network')
# parser.add_argument('--pearson_net', type=int, default=3,
# help='pearson correlation as the network')
# model settings
parser.add_argument('--hop', default=0, metavar='S',
help='enclosing subgraph hop number, \
options: 1, 2,..., "auto"')
parser.add_argument('--max-nodes-per-hop', default=None,
help='if > 0, upper bound the # nodes per hop by subsampling')
parser.add_argument('--use-embedding', action='store_true', default=False,
help='whether to use node2vec node embeddings')
parser.add_argument('--use-attribute', action='store_true', default=True,
help='whether to use node attributes')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
print(args)
random.seed(cmd_args.seed)
np.random.seed(cmd_args.seed)
torch.manual_seed(cmd_args.seed)
if args.hop != 'auto':
args.hop = int(args.hop)
if args.max_nodes_per_hop is not None:
args.max_nodes_per_hop = int(args.max_nodes_per_hop)
'''Prepare data'''
args.file_dir = os.path.dirname(os.path.realpath('__file__'))
# data1: top 195 are TF
# data3: top 334 are TF
# data4: top 333 are TF
# Human: top 745 are TF
dreamTFdict={}
dreamTFdict['data1']=195
dreamTFdict['data3']=334
dreamTFdict['data4']=333
dreamTFdict['Human']=745
# Inductive learning
# For 1vs 1
if args.traindata_name is not None:
# Select data name
trdata_name = args.traindata_name.split('_')[0]
tedata_name = args.testdata_name.split('_')[0]
# Prepare Training
trainNet_ori = np.load(os.path.join(args.file_dir, 'data/dream/ind.{}.csc'.format(args.traindata_name)),allow_pickle=True)
trainGroup = np.load(os.path.join(args.file_dir, 'data/dream/ind.{}.allx'.format(trdata_name)),allow_pickle=True)
trainNet = np.load(args.file_dir+'/data/dream/'+trdata_name+'_pmatrix_'+str(args.pearson_net)+'.npy',allow_pickle=True).tolist()
# trainNet = np.load(args.file_dir+'/data/dream/'+trdata_name+'_mmatrix_'+str(args.mutual_net)+'.npy',allow_pickle=True).tolist()
allx =trainGroup.toarray().astype('float32')
#deal with the features:
trainAttributes = genenet_attribute(allx,dreamTFdict[trdata_name])
# Prepare Testing
testNet_ori = np.load(os.path.join(args.file_dir, 'data/dream/ind.{}.csc'.format(args.testdata_name)),allow_pickle=True)
testGroup = np.load(os.path.join(args.file_dir, 'data/dream/ind.{}.allx'.format(tedata_name)),allow_pickle=True)
testNet = np.load(args.file_dir+'/data/dream/'+tedata_name+'_pmatrix_'+str(args.pearson_net)+'.npy',allow_pickle=True).tolist()
# testNet = np.load(args.file_dir+'/data/dream/'+tedata_name+'_mmatrix_'+str(args.mutual_net)+'.npy',allow_pickle=True).tolist()
allxt =testGroup.toarray().astype('float32')
#deal with the features:
testAttributes = genenet_attribute(allxt,dreamTFdict[tedata_name])
# train_pos, train_neg, _, _ = sample_neg(trainNet_ori, 0.0, max_train_num=args.max_train_num)
train_pos, train_neg, _, _ = sample_neg_TF(trainNet_ori, 0.0, TF_num=dreamTFdict[trdata_name], max_train_num=args.max_train_num)
#_, _, test_pos, test_neg = sample_neg(testNet_ori, 1.0, max_train_num=args.max_train_num)
_, _, test_pos, test_neg = sample_neg_TF(testNet_ori, 1.0, TF_num=dreamTFdict[tedata_name], max_train_num=args.max_train_num)
# test_pos, test_neg = sample_neg_all_TF(testNet_ori, TF_num=dreamTFdict[args.testdata_name])
'''Train and apply classifier'''
Atrain = trainNet.copy() # the observed network
Atest = testNet.copy() # the observed network
Atest[test_pos[0], test_pos[1]] = 0 # mask test links
Atest[test_pos[1], test_pos[0]] = 0 # mask test links
train_node_information = None
test_node_information = None
if args.use_embedding:
train_embeddings = generate_node2vec_embeddings(Atrain, args.embedding_dim, True, train_neg) #?
train_node_information = train_embeddings
test_embeddings = generate_node2vec_embeddings(Atest, args.embedding_dim, True, test_neg) #?
test_node_information = test_embeddings
if args.use_attribute and trainAttributes is not None:
if train_node_information is not None:
train_node_information = np.concatenate([train_node_information, trainAttributes], axis=1)
test_node_information = np.concatenate([test_node_information, testAttributes], axis=1)
else:
train_node_information = trainAttributes
test_node_information = testAttributes
train_graphs, test_graphs, train_labels, test_labels = links2subgraphsTranSVM(Atrain, Atest, train_pos, train_neg, test_pos, test_neg, args.hop, args.max_nodes_per_hop, train_node_information, test_node_information)
# For 2 vs 1
if args.traindata_name2 is not None:
trainNet2_ori = np.load(os.path.join(args.file_dir, 'data/dream/ind.{}.csc'.format(args.traindata_name2)))
trainGroup2 = np.load(os.path.join(args.file_dir, 'data/dream/ind.{}.allx'.format(args.traindata_name2)))
trainNet2 = np.load(args.file_dir+'/data/dream/'+args.traindata_name2+'_pmatrix_'+str(args.pearson_net)+'.npy').tolist()
# trainNet2 = np.load(args.file_dir+'/data/dream/'+args.traindata_name2+'_mmatrix_'+str(args.pearson_net)+'.npy').tolist()
allx2 =trainGroup2.toarray().astype('float32')
#deal with the features:
trainAttributes2 = genenet_attribute(allx2,dreamTFdict[args.traindata_name2])
train_pos2, train_neg2, _, _ = sample_neg(trainNet2_ori, 0.0, max_train_num=args.max_train_num)
Atrain2 = trainNet2.copy() # the observed network
train_node_information2 = None
if args.use_embedding:
train_embeddings2 = generate_node2vec_embeddings(Atrain2, args.embedding_dim, True, train_neg2) #?
train_node_information2 = train_embeddings2
if args.use_attribute and trainAttributes2 is not None:
if train_node_information2 is not None:
train_node_information2 = np.concatenate([train_node_information2, trainAttributes2], axis=1)
else:
train_node_information2 = trainAttributes2
train_graphs2, _, train_labels2, _ = links2subgraphsTranSVM(Atrain2, Atest, train_pos2, train_neg2, test_pos, test_neg, args.hop, args.max_nodes_per_hop, train_node_information2, test_node_information)
train_graphs = train_graphs + train_graphs2
train_labels = train_labels + train_labels2
if train_node_information is not None:
train_node_information = np.concatenate([train_node_information, train_node_information2], axis=0)
print('# train: %d, # test: %d' % (len(train_graphs), len(test_graphs)))
X=np.asarray(train_graphs)
y=np.asarray(train_labels)
testx=np.asarray(test_graphs)
true_y=np.asarray(test_labels)
# clf = LinearSVC()
clf = svm.SVC(gamma='scale')
# clf = svm.SVC()
clf.fit(X, y)
pred=clf.predict(testx)
y_score=clf.decision_function(testx)
print(classification_report(true_y, pred))
tn, fp, fn, tp = confusion_matrix(true_y,pred).ravel()
print(str(tp)+"\t"+str(fp)+"\t"+str(tn)+"\t"+str(fn))
np.save('svm_true_y_'+args.testdata_name+'_all.npy',true_y)
np.save('svm_y_score_'+args.testdata_name+'_all.npy',y_score)
# precision, recall, _ = precision_recall_curve(true_y, y_score)
# # plot no skill
# plt.plot([0, 1], [0.5, 0.5], linestyle='--')
# # plot the precision-recall curve for the model
# plt.plot(recall, precision, marker='.')
# # show the plot
# #plt.show()
# plt.savefig('SVM_34_e.png')
#randomforest
rf = RandomForestClassifier()
rf.fit(X, y)
pred=rf.predict(testx)
y_score=rf.predict_proba(testx)
print(classification_report(true_y, pred))
tn, fp, fn, tp = confusion_matrix(true_y,pred).ravel()
print(str(tp)+"\t"+str(fp)+"\t"+str(tn)+"\t"+str(fn))
np.save('rf_true_y_'+args.testdata_name+'_all.npy',true_y)
np.save('rf_y_score_'+args.testdata_name+'_all.npy',y_score)