-
Notifications
You must be signed in to change notification settings - Fork 6
/
sentence_retrieval.py
48 lines (42 loc) · 1.52 KB
/
sentence_retrieval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import os
import json
import jsonlines
import nltk
import codecs
import utilities
def getRelevantSentences(relevant_docs, entities, wiki_split_docs_dir):
# split_entities = []
relevant_sentences = []
# for ent in entities:
# split_entities = split_entities + ent.split(" ")
for relevant_doc in relevant_docs:
file = codecs.open(wiki_split_docs_dir + "/" + relevant_doc + ".json","r","utf-8")
file = json.load(file)
full_lines = file["lines"]
lines = []
for line in full_lines:
lines.append(line['content'])
# file = codecs.open(wiki_split_docs_dir + "/" + relevant_doc + ".txt","r","utf-8")
# lines = file.readlines()
for i in range(len(lines)):
lines[i] = lines[i].strip()
lines[i] = lines[i].replace("-LRB-"," ( ")
lines[i] = lines[i].replace("-RRB-"," ) ")
if lines[i] == "":
continue
# split_line = lines[i].split(" ")
# intersection = utilities.listIntersection(split_line,split_entities)
# if len(intersection) > 0:
temp = {}
temp['id'] = relevant_doc
temp['line_num'] = i
temp['sentence'] = lines[i]
relevant_sentences.append(temp)
return relevant_sentences
def getSentence(wiki_doc_dir, doc_filename, sentence_id):
doc = codecs.open(wiki_doc_dir + "/" + doc_filename + ".txt","r","utf-8")
doc_splitted_lines= doc["lines"].split("\n")
# assuming that the sentence id corresponds to the order of the sentences in the doc
# sentences are organized as follows:
# SENTENCE_ID\tSENTENCE_TEXT\tNAMED_ENTITY1\tNAMED_ENTITY2
return doc_splitted_lines[sentence_id].split("\t")[1]