-
Notifications
You must be signed in to change notification settings - Fork 0
/
sumy_test.py
60 lines (37 loc) · 1.64 KB
/
sumy_test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from sumy.parsers.html import HtmlParser
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lsa import LsaSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
import json
LANGUAGE = "english"
SENTENCES_COUNT = 2
# https://www.cnn.com/2020/11/07/health/us-coronavirus-saturday/index.html
# https://www.nytimes.com/2020/11/06/opinion/sunday/joe-biden-president-policy.html
# https://theintercept.com/2020/11/06/fox-news-election-trump-murdoch/
# https://www.foxnews.com/politics/pa-battlegrounds-biden-trump-legal-fight
if __name__ == "__main__":
url = "https://www.nytimes.com/2020/11/06/opinion/sunday/joe-biden-president-policy.html"
parser = HtmlParser.from_url(url, Tokenizer(LANGUAGE))
# or for plain text files
# parser = PlaintextParser.from_file("document.txt", Tokenizer(LANGUAGE))
stemmer = Stemmer(LANGUAGE)
summarizer = Summarizer(stemmer)
summarizer.stop_words = get_stop_words(LANGUAGE)
summarized = summarizer(parser.document, SENTENCES_COUNT)
#print(summarized)
sentence_list = []
for sentence in summarizer(parser.document, SENTENCES_COUNT):
sentence_list.append(sentence)
x = {
"title" : "article title",
"first" : str(sentence_list[0]),
"second" : str(sentence_list[1])
}
json_obj = json.dumps(x)
print(json.loads(json_obj))
with open('file.json','w') as outfile:
json.dump(x, outfile)