-
Notifications
You must be signed in to change notification settings - Fork 3
/
utils.py
137 lines (114 loc) · 4.69 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import os
import h5py
import string
import zipfile
import pandas as pd
import numpy as np
from nltk import word_tokenize
from multiprocessing import Pool
from keras.utils import to_categorical, Sequence
num_partitions = 10
num_cores = 4
class ListDataGenerator(Sequence):
'Generates data for Keras'
def __init__(self, train, max_len_encoding_passage, max_len_encoding_query,
batch_size=128, shuffle=True, debug=False):
'Initialization'
self.unique_ques = len(np.unique(train.query_id.values))
self.query_id = train.query_id.values.reshape((self.unique_ques,-1))
self.query = train["query"].values.reshape((self.unique_ques,-1))[:,0]
self.passage = train["passage_text"].values.reshape((self.unique_ques,-1))
self.label = train.label.values.reshape((self.unique_ques,-1))
self.query_len = np.vstack(train.query_mask.values).reshape((self.unique_ques,-1, max_len_encoding_query))[:,0]
self.passage_len = np.vstack(train.passage_mask.values).reshape((self.unique_ques,-1, max_len_encoding_passage))
self.shuffle = shuffle
self.batch_size = batch_size
self.debug = debug
assert self.query_id.shape == self.label.shape
self.on_epoch_end()
def set_batch_size(batch_size):
self.batch_size = batch_size
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(self.unique_ques/ self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
batch_indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Generate data
if self.debug:
X, y, _ = self.__data_generation(batch_indexes)
return X, y, _
else:
X, y = self.__data_generation(batch_indexes)
return X, y
def on_epoch_end(self):
'Updates indexes after each epoch'
print("Generating new batches")
self.indexes = np.arange(len(self.query_id))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, batch_indexes):
'Generates data containing batch_size samples'
X = [self.query[batch_indexes],
self.passage[batch_indexes],
self.query_len[batch_indexes],
self.passage_len[batch_indexes]]
y = self.label[batch_indexes]
if self.debug:
return X, y, self.query_id[batch_indexes]
return X, y
def parallelize_dataframe(df, func):
df_split = np.array_split(df, num_partitions)
pool = Pool(num_cores)
df = pd.concat(pool.map(func, df_split))
pool.close()
pool.join()
return df
stopset = list(string.punctuation)
def tokenize(text, max_len):
new_seq = []
mask = np.zeros(max_len, dtype="int8")
tokens = word_tokenize(text.lower())
tokens = [token for token in tokens if token not in stopset]
for i in range(max_len):
try:
new_seq.append(tokens[i])
mask[i] = 1
except:
new_seq.append("__PAD__")
return " ".join(new_seq), mask
def specific_save_epoch(model,path):
filename = '%s.h5' % (path)
h5_file = h5py.File(filename,'w')
weight = model.get_weights()
for i in range(len(weight)):
h5_file.create_dataset('weight'+str(i),data=weight[i])
h5_file.close()
def specific_load_epoch(model,path):
filename = '%s.h5' % (path)
h5_file = h5py.File(filename,'r')
weight = []
for i in range(len(h5_file.keys())):
weight.append(h5_file['weight'+str(i)][:])
model.set_weights(weight)
def mean_reciprocal_rank(preds, labels, cases = 10):
total_queries = int(len(labels)/cases)
preds_ = preds.reshape((total_queries, cases))
labels_ = labels.reshape((total_queries, cases))
sorted_indices = (-preds_).argsort()
rel = np.zeros((total_queries,cases))
for i, index in enumerate(sorted_indices):
rel[i,:] = labels_[i,:][index]
return np.mean(1.0/(np.argmax(rel, axis = 1) + 1))
def prepare_submission(df, submission_name, base_path = ""):
subm = (df.groupby("query_id")["score"]
.apply(list).reset_index())
subm[list(map(str,range(10)))] = subm["score"].apply(pd.Series)
subm[["query_id"] + list(map(str,range(10))) ].to_csv("answer.tsv",
sep="\t",
index=False,
header=False)
zipfile.ZipFile(os.path.join(base_path ,submission_name +'.zip'), mode='w').write("answer.tsv")
os.remove("answer.tsv")