-
Notifications
You must be signed in to change notification settings - Fork 0
/
UsefulFunctions.py
57 lines (39 loc) · 1.34 KB
/
UsefulFunctions.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
# All imports here
import nltk
import pandas as pd
import numpy as np
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
import re
import SpellCorrection
# Functions Defined
vect = TfidfVectorizer()
# Function to create list
def columnstoList(data):
Ques=list(data.iloc[:,0])
Resp=list(data.iloc[:,1])
return Ques,Resp
def tokenization_spellcheck(sample_list):
corpus = []
for i in range(len(sample_list)):
review = re.sub('[^a-zA-Z1-9]', ' ', sample_list[i])
review = review.lower()
review = review.split()
#Spell Correction
for k in range(len(review)):
review[k]=SpellCorrection.correction(review[k])
#Stemming And Stopwords
ps = PorterStemmer()
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
corpus.append(review)
return corpus
# Function to create tfidfvectorizer
def createTfidfVectorizer(queslist):
qmatrix = vect.fit_transform(queslist).toarray()
return qmatrix
def createTfidfVectorizer_Instance(queslist):
rmatrix = vect.transform(queslist).toarray()
return rmatrix