-
Notifications
You must be signed in to change notification settings - Fork 0
/
char_tabanlı_count_1gram.py
59 lines (44 loc) · 2.15 KB
/
char_tabanlı_count_1gram.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
import xgboost
from sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm
from sklearn import decomposition, ensemble
"""**BU PROGRAMDA KARAKTER TABANLI 1-GRAM COUNT VECTORIZER ICIN KLASİK YÖNTEMLERİN BAŞARI SONUÇLARI ELDE EDİLMİŞTİR**"""
train=pd.read_excel("clean_tweet_train.xlsx")
test=pd.read_excel("clean_tweet_test.xlsx")
train.head()
Train = train.append(test, ignore_index=True).fillna(' ')
train.dropna(inplace=True)
train.reset_index(drop=True,inplace=True)
train.info()
test.dropna(inplace=True)
test.reset_index(drop=True,inplace=True)
test.info()
x_train=train.text.tolist()
y_train=train.sentiment.tolist()
x_test=test.text.tolist()
y_test=test.sentiment.tolist()
count = CountVectorizer(analyzer='char',ngram_range=(1,1))
count.fit(Train['text'])
xtrain_count = count.transform(x_train)
xtest_count = count.transform(x_test)
def model_training(classifier, vector_train, y_train, vector_test):
classifier.fit(vector_train, y_train)
predictions = classifier.predict(vector_test)
return metrics.accuracy_score(predictions, y_test)
# Naive Bayes
accuracy = model_training(naive_bayes.MultinomialNB(),xtrain_count, y_train,xtest_count )
print ("NB, karakter tabanlı count-vectorizer:% ", accuracy*100)
# Logistic Regression
accuracy = model_training(linear_model.LogisticRegression(solver='newton-cg',multi_class='multinomial'), xtrain_count, y_train, xtest_count)
print ("LR, karakter tabanlı count-vectorizer:%", accuracy*100)
# SVM
accuracy = model_training(svm.SVC(kernel='linear'), xtrain_count, y_train, xtest_count)
print ("SVM, karakter tabanlı count-vectorizer::%", accuracy*100)
# Random forest
accuracy = model_training(ensemble.RandomForestClassifier(n_estimators=100), xtrain_count, y_train, xtest_count)
print ("RF,karakter tabanlı count-vectorizer:% ", accuracy*100)
# Extereme Gradient Boosting
accuracy = model_training(xgboost.XGBClassifier(booster='gblinear'), xtrain_count.tocsc(), y_train, xtest_count.tocsc())
print ("Xgb, karakter tabanlı count-vectorizer:% ", accuracy*100)