-
Notifications
You must be signed in to change notification settings - Fork 28
/
trainclassifier.py
executable file
·136 lines (112 loc) · 5.23 KB
/
trainclassifier.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import argparse, sys, pickle
from sklearn.cross_decomposition import PLSRegression
from sklearn.linear_model import *
from sklearn.model_selection import KFold
from sklearn.metrics import *
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
def scoremodel(model, x, y):
'''Return fitness of model. We'll use AUC.'''
p = model.predict(x).squeeze()
return roc_auc_score(y,p)
def trainmodels(m, x, y):
'''For the model type m, train a classifier on x->y using built-in CV to
parameterize. Return both this model and an unfit model that can be used for CV.
'''
if m == 'knn':
#have to manually cross-validate to choose number of components
kf = KFold(n_splits=3)
bestscore = -10000
besti = 0
for i in range(1,10):
#try larger number of components until average CV perf decreases
knn = KNeighborsClassifier(i)
scores = []
#TODO: parallelize below
for train,test in kf.split(x):
xtrain = x[train]
ytrain = y[train]
xtest = x[test]
ytest = y[test]
knn.fit(xtrain,ytrain)
score = scoremodel(knn,xtest,ytest)
scores.append(score)
ave = np.mean(scores)
if ave > bestscore:
bestscore = ave
besti = i
model = KNeighborsClassifier(besti)
model.fit(x,y)
print("Best k = %d"%besti)
unfit = KNeighborsClassifier(besti) #choose number of components using full data - iffy
elif m == 'svm':
C_range = np.logspace(-2, 3, 6)
gamma_range = np.logspace(-9, 3, 7)
param_grid = dict(gamma=gamma_range, C=C_range)
grid = GridSearchCV(SVC(), param_grid=param_grid,n_jobs=-1)
grid.fit(x,y)
print("svm params",grid.best_params_)
model = grid.best_estimator_
unfit = SVC(**grid.best_params_)
elif m == 'logistic':
model = LogisticRegressionCV(n_jobs=-1)
model.fit(x,y)
unfit = LogisticRegressionCV(n_jobs=-1)
elif m == 'rf':
#evalute different max depths
parameters = {'max_depth': range(2,int(np.log2(len(x[0])))+1)}
clf = GridSearchCV(RandomForestClassifier(), parameters, 'roc_auc',n_jobs=-1)
clf.fit(x,y)
model = clf.best_estimator_
print("max_depth =",clf.best_params_['max_depth'])
unfit = RandomForestClassifier(**clf.best_params_)
return (model,unfit)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Train linear model from fingerprint file')
parser.add_argument('input',help='Fingerprints input file')
parser.add_argument('-o','--outfile', type=argparse.FileType('wb'), help="Output file for model (trained on full data)")
parser.add_argument('-k','--kfolds',type=int,default=3,help="Number of folds for cross-validation")
parser.add_argument('-y','--labels',help="Labels (y-values). Will override any specified in fingerprints file")
models = parser.add_mutually_exclusive_group()
models.add_argument('--svm',action='store_const',dest='model',const='svm',help="Use support vector machine (rbf kernel)")
models.add_argument('--knn',action='store_const',dest='model',const='knn',help="Use k-nearest neighbors")
models.add_argument('--rf',action='store_const',dest='model',const='rf',help="Use random forest")
models.add_argument('--logistic',action='store_const',dest='model',const='logistic',help="Use logistic regression")
parser.set_defaults(model='knn')
args = parser.parse_args()
#out = args.outfile
comp = 'gzip' if args.input.endswith('.gz') else None
data = pd.read_csv(args.input,compression=comp,header=None,delim_whitespace=True)
if args.labels: #override what is in fingerprint file
y = np.genfromtxt(args.labels,np.float)
if len(y) != len(data):
print("Mismatched length between affinities and fingerprints (%d vs %d)" % (len(y),len(x)))
sys.exit(-1)
data.iloc[:,1] = y
np.random.seed(0) #I like reproducible results, so fix a seed
data = data.iloc[np.random.permutation(len(data))] #shuffle order of data
smi = np.array(data.iloc[:,0])
y = np.array(data.iloc[:,1],dtype=np.float)
x = np.array(data.iloc[:,2:],dtype=np.float)
del data #dispose of pandas copy
(fit,unfit) = trainmodels(args.model, x, y)
fitscore = scoremodel(fit,x,y)
print("Full Regression: AUC=%.4f" % fitscore)
kf = KFold(n_splits=args.kfolds)
scores = []
for train,test in kf.split(x):
xtrain = x[train]
ytrain = y[train]
xtest = x[test]
ytest = y[test]
unfit.fit(xtrain,ytrain)
scores.append(scoremodel(unfit, xtest, ytest))
print(f"{args.kfolds}-fold CV: AUC=%.4f (std %.4f)" % (np.mean(scores), np.std(scores)))
print("Gap: %.4f" % (fitscore-np.mean(scores)))
if args.outfile:
pickle.dump(fit, args.outfile, pickle.HIGHEST_PROTOCOL)