-
Notifications
You must be signed in to change notification settings - Fork 1
/
Word2vec_prepare.py
72 lines (54 loc) · 1.87 KB
/
Word2vec_prepare.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import Survey
import postProcess
import json
import fio
import NLTKWrapper
phraseext = ".key" #a list
studentext = ".keys.source" #json
countext = ".dict" #a dictionary
lpext = ".lp"
lpsolext = ".sol"
ngramext = ".ngram.json"
def getNgram(prefix):
#extract the ngram from the phrase
data = {}
phrasefile = prefix+phraseext
lines = fio.ReadFile(phrasefile)
phrases = [line.strip() for line in lines]
#get unigram
for n in [1,2]:
ngrams = []
for phrase in phrases:
grams = NLTKWrapper.getNgram(phrase, n)
ngrams = ngrams + grams
ngrams = list(set(ngrams))
data[n] = ngrams
with open(prefix + ngramext, 'w') as outfile:
json.dump(data, outfile, indent=2)
def ExtractNgram(outdir, np):
sheets = range(0,12)
for i, sheet in enumerate(sheets):
week = i + 1
dir = outdir + str(week) + '/'
for type in ['POI', 'MP', 'LP']:
prefix = dir + type + "." + np
getNgram(prefix)
if __name__ == '__main__':
excelfile = "../../data/2011Spring_norm.xls"
sennadatadir = "../../data/senna/"
outdir = "../../data/wordvector/"
#Step1: get senna input
#Survey.getStudentResponses4Senna(excelfile, sennadatadir)
#Step2: get senna output
#Step3: get phrases
#for np in ['syntax', 'chunk']:
# for np in ['syntax']:
# postProcess.ExtractNPFromRaw(excelfile, sennadatadir, outdir, method=np)
# postProcess.ExtractNPSource(excelfile, sennadatadir, outdir, method=np)
# postProcess.ExtractNPFromRawWithCount(excelfile, sennadatadir, outdir, method=np)
#
# #Step4: write TA's reference
# Survey.WriteTASummary(excelfile, outdir)
for np in ['syntax']:
ExtractNgram(outdir, np)
print "done"