-
Notifications
You must be signed in to change notification settings - Fork 1
/
embedding.py
37 lines (31 loc) · 1.23 KB
/
embedding.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import codecs
import json
import numpy as np
from gensim.models import Word2Vec
from keras.layers import Embedding
# create embeddings with gensim
def create_embeddings(sentences,
embeddings_path='temp_embeddings/embeddings.gensimmodel',
vocab_path='temp_embeddings/mapping.json',
**params):
# print(sentences)
model = Word2Vec(sentences, **params)
model.save(embeddings_path)
# weights = model.syn0
# np.save(open(embeddings_path, 'wb'), weights)
vocab = dict([(k, v.index) for k, v in model.wv.vocab.items()])
with open(vocab_path, 'w') as f:
f.write(json.dumps(vocab))
return vocab, model
# load vocabulary index from json file
def load_vocab(vocab_path='temp_embeddings/mapping.json'):
with open(vocab_path, 'r') as f:
data = json.loads(f.read())
word2idx = data
idx2word = dict([(v, k) for k, v in data.items()])
return word2idx, idx2word
# embedding layer function
def word2vec_embedding_layer(embeddings_path='temp_embeddings/embeddings.npz'):
weights = np.load(open(embeddings_path, 'rb'))
layer = Embedding(input_dim=weights.shape[0], output_dim=weights.shape[1], weights=[weights])
return layer