-
Notifications
You must be signed in to change notification settings - Fork 9
/
helper.py
49 lines (44 loc) · 1.69 KB
/
helper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import array
import numpy as np
import tensorflow as tf
from collections import defaultdict
def load_vocab(filename):
vocab = None
with open(filename) as f:
vocab = f.read().splitlines()
dct = defaultdict(int)
for idx, word in enumerate(vocab):
dct[word] = idx
return [vocab, dct]
def load_glove_vectors(filename, vocab):
"""
Load glove vectors from a .txt file.
Optionally limit the vocabulary to save memory. `vocab` should be a set.
"""
dct = {}
vectors = array.array('d')
current_idx = 0
with open(filename, "r") as f:
f.readline()
for _, line in enumerate(f):
tokens = line.split(" ")
word = tokens[0]
entries = tokens[1:]
if not vocab or word in vocab:
dct[word] = current_idx
vectors.extend(float(x) for x in entries)
current_idx += 1
word_dim = len(entries)
num_vectors = len(dct)
tf.logging.info("Found {} out of {} vectors in {}".format(num_vectors, len(vocab), filename))
return [np.array(vectors).reshape(num_vectors, word_dim), dct]
def build_initial_embedding_matrix(vocab_dict, glove_dict, glove_vectors, embedding_dim,random_seed):
np.random.seed(random_seed)
initial_embeddings = np.random.normal(0.0, 0.1, size=(len(vocab_dict), embedding_dim), ).astype("float32")
for word, glove_word_idx in glove_dict.items():
word_idx = vocab_dict.get(word)
initial_embeddings[word_idx, :] = glove_vectors[glove_word_idx]
return initial_embeddings
# vocab, vocab_dict = load_vocab('data/vocabulary.txt')
# glove_vectors,glove_dict = load_glove_vectors('data/test.txt',vocab)
# build_initial_embedding_matrix(vocab_dict,glove_dict,glove_vectors,300)