-
Notifications
You must be signed in to change notification settings - Fork 27
/
default_multi.cfg
65 lines (51 loc) · 1.57 KB
/
default_multi.cfg
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
[BASIC]
# the directory of input ontologies; mandatory
ontology_dir = ~/Exp5_Conference/ontologies/
# the output director for the embedding
# default: $cache_dir/output
# embedding_dir = output_embedding
embedding_dir = ~/Exp5_Conference/owl2vec/d2_r4_nr_p
[DOCUMENT]
# cache directory for storing files
# default: ./cache/
cache_dir = ~/Exp5_Conference/owl2vec/
# use or not use the projected ontology
# default: no
ontology_projection = yes
# reasoning for extract axioms
# axiom_reasoner = hermit
# axiom_reasoner = elk
axiom_reasoner = none
# walker and walk_depth must be set
# random walk or random walk with Weisfeiler-Lehman (wl) subtree kernel
walker = random
# walker = wl
walk_depth = 4
# use URI/Literal/Mixture document (yes or no)
# they can be over witten by the command line parameters
URI_Doc = yes
Lit_Doc = yes
Mix_Doc = no
# the type for generating the mixture document (all or random)
# works when Mix_Doc is set to yes
# Mix_Type = all
Mix_Type = random
[MODEL]
# the directory of the pre-trained word2vec model
# default: without pre-training
# comment it if no pre-training is needed
# pre_train_model = ~/w2v_model/enwiki_model/word2vec_gensim
# Available from https://tinyurl.com/word2vec-model
# the size for embedding
# it is set to the size of the pre-trained model if it is adopted
embed_size = 100
# number of iterations in training the word2vec model
# i.e., epochs in gensim 4.x.x
iteration = 10
# for training the word2vec model without pre-training
window = 5
min_count = 1
negative = 25
seed = 42
# epoch for fine-tuning the pre-trained model
epoch = 100