-
Notifications
You must be signed in to change notification settings - Fork 15
/
config.ini
executable file
·68 lines (56 loc) · 1.63 KB
/
config.ini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
###################################
# configuration #
###################################
[DEFAULT]
task_name=semeval
output_dir=./output/semeval
use_entity_indicator=True
[MODEL]
seed = 12345
#pretrained_model_name=bert-base-uncased
pretrained_model_name=bert-large-uncased
#pretrained_model_name=./large-uncased-model
[Train]
num_train_epochs=5.0
# Total number of training epochs to perform.
learning_rate=3e-5
# The initial learning rate for Adam.
per_gpu_train_batch_size=16
# Batch size per GPU/CPU for training.
per_gpu_eval_batch_size=8
# Batch size per GPU/CPU for evaluation.
no_cuda=False
# Avoid using CUDA when available
[Dataset]
data_dir= ./data
max_seq_len=128
train=True
eval=True
evaluate_during_training=True
gradient_accumulation_steps=1
# Number of updates steps to accumulate before performing a backward/update pass.
weight_decay=1e-3
# Weight deay if we apply some.
adam_epsilon=1e-8
# Epsilon for Adam optimizer.
max_grad_norm=1.0
# Max gradient norm.
max_steps=-1
# If > 0: set total number of training steps to perform. Override num_train_epochs.
warmup_steps=0
# Linear warmup over warmup_steps.
logging_steps=200
# Log every X updates steps.
save_steps=200
# help="Save checkpoint every X updates steps.
eval_all_checkpoints=False
# Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number
#l2_reg_lambda=5e-3
l2_reg_lambda=0
overwrite_output_dir=True
# Overwrite the content of the output directory
overwrite_cache=True
# Overwrite the cached training and evaluation sets
local_rank=-1
# For distributed training: local_rank
latent_entity_typing=False