-
Notifications
You must be signed in to change notification settings - Fork 0
/
ml_static.py
91 lines (71 loc) · 3.39 KB
/
ml_static.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
import pickle
df = pd.read_csv('final_data_static.csv')
X = df.drop(['hash', 'legit'], axis=1).values
y = df.drop(['hash', 'DllCharacteristics', 'DebugSize', 'MajorImageVersion', 'MinorImageVersion', 'IatRVA', 'ExportSize', 'ResourceSize', 'NumberOfSections'], axis=1).values
# drop 1st column
X = X[:, 1:]
y = y[:, 1:]
# convert dtype to int
X = X.astype(int)
y = y.astype(int)
X_train, X_test, y_train, y_test = train_test_split(X, y)
# == PREPROCESSING ==
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# # == TRAINING ==
forest = RandomForestClassifier(n_estimators=10, random_state=0)
forest.fit(X_train_scaled, y_train.ravel())
# save the model to disk
filename = 'static_classifier.sav'
pickle.dump(forest, open(filename, 'wb'))
# print("RandomForestClassifier accuracy: {:.2f}".format(forest.score(X_test_scaled, y_test)*100))
y_pred = forest.predict(X_test_scaled)
a = accuracy_score(y_test, y_pred)
p,r,f,s = precision_recall_fscore_support(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel()
print("RandomForestClassifier")
print(f"accuracy : {a}\nprecision : {p}\nrecall : {r}\nf1 score : {f1}\nflase positive (fall out) : {fp / (fp + tn)}\nfalse negative (miss rate) : {fn / (fn + tp)}")
print("---")
clf = KNeighborsClassifier(n_neighbors=10)
clf.fit(X_train_scaled, y_train.ravel())
# print("KNeighborsClassifier accuracy: {:.2f}".format(clf.score(X_test_scaled, y_test)*100))
y_pred = clf.predict(X_test_scaled)
a = accuracy_score(y_test, y_pred)
p,r,f_beta,s = precision_recall_fscore_support(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel()
print("KNeighborsClassifier")
print(f"accuracy : {a}\nprecision : {p}\nrecall : {r}\nf1 score : {f1}\nflase positive (fall out) : {fp / (fp + tn)}\nfalse negative (miss rate) : {fn / (fn + tp)}")
print('---')
param_grid = {'C': [0.001, 0.01, 0.1, 1, 10]}
grid = GridSearchCV(LogisticRegression(max_iter = 1000), param_grid, cv=5)
grid.fit(X_train_scaled, y_train.ravel())
# print("Best cross-validation score : {:.2f}".format(grid.best_score_))
# print("Best parameters : ", grid.best_params_)
# print('---')
# Test Data
# X_test_vect = vect.transform(X_test_string_list)
# Model Evaluation
y_pred = grid.predict(X_test_scaled)
a = accuracy_score(y_test, y_pred)
p,r,f,s = precision_recall_fscore_support(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel()
print("LogisticRegression")
print(f"accuracy : {a}\nprecision : {p}\nrecall : {r}\nf1 score : {f1}\nflase positive (fall out) : {fp / (fp + tn)}\nfalse negative (miss rate) : {fn / (fn + tp)}")
# Project must have good accuracy, precision, recall, and F-score for both machine learning
# models (Static and Dynamic analysis) with low false positive and low false negative rate.