-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain_arguments.py
61 lines (57 loc) · 1.35 KB
/
train_arguments.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
TRAIN_SCENARIO = "standard"
USE_LOGGER = True
USE_LOGGER_VERSIONING = False
SAVE_CHECKPOINT = True
CONTINUE_TRAIN = False
LOG_EVERY_N_STEPS = 1
EVAL_OOD = True
LOSS_FCT = "MSE"
BATCH_SIZE = 64
NUM_WORKERS = 8
MAX_EPOCHS = 1000
ACCELERATOR = "gpu"
TRAINER_CONFIG = dict(
accelerator=ACCELERATOR,
devices=1, # for benchmarking for research papers, use 1 device only
max_epochs=MAX_EPOCHS,
enable_checkpointing=True if SAVE_CHECKPOINT else False,
# precision=16
)
PLOT_FORECAST = True
HPARAMS = dict(
MLP = dict(
d_hidden_layers=[
[256, 512, 256]
],
batch_norm=[False],
),
TCN = dict(
kernel_size=[9],
num_channels= [(64, 128, 64)],
dropout= [0]
),
GRU = dict(
d_hidden=[256],
n_layers=[1],
autoregressive=[False, True],
),
TcnAe = dict(
latent_dim=[16],
enc_tcn1_in_dims=[(3, 50, 40, 30)],
enc_tcn1_out_dims=[(50, 40, 30, 10)],
enc_tcn2_in_dims=[(10, 8, 6, 3)],
enc_tcn2_out_dims=[(8, 6, 3, 1)],
kernel_size=[15],
),
Transformer = dict(
d_model=[16],
d_ff=[256],
n_layers_enc=[4],
n_layers_dec=[4],
n_heads=[4],
dropout = [0],
norm_first = [False],
conv_embed = [False, True],
conv_kernel_size = [3],
)
)