-
Notifications
You must be signed in to change notification settings - Fork 5
/
hyperparameter_tuning_experiments_can.py
executable file
·124 lines (101 loc) · 3.83 KB
/
hyperparameter_tuning_experiments_can.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
#!/usr/bin/env python3
"""
Experiments to run for hyperparameter search - CAN baseline
Usage:
./hyperparameter_tuning_experiments_can.py | tee hyperparameter_tuning_experiments_list_can.py
"""
import random
import collections
from print_dictionary import print_dictionary
def generate_hyperparameter_list(max_experiments=None, seed=None):
base_lr_options = [0.0001, 0.001]
source_batch_size_options = [30, 60]
alpha_options = [0.0001, 0.0005, 0.001]
beta_options = [0.75, 1, 1.5, 2, 2.25, 2.5, 2.75, 3]
loss_weight_options = [0.1, 0.2, 0.3, 0.4, 0.5]
experiments = []
# All combinations of the above
for base_lr in base_lr_options:
for source_batch_size in source_batch_size_options:
for alpha in alpha_options:
for beta in beta_options:
for loss_weight in loss_weight_options:
experiments.append([
base_lr,
source_batch_size,
alpha,
beta,
loss_weight,
])
# Limit number (if there's enough experiments)
if max_experiments is not None and len(experiments) > max_experiments:
if seed is not None:
random.Random(seed).shuffle(experiments)
else:
random.shuffle(experiments)
experiments = experiments[:max_experiments]
# Output list of hyperparameters for this method
runlist = []
for i, the_tuple in enumerate(experiments):
(
base_lr,
source_batch_size,
alpha,
beta,
loss_weight,
) = the_tuple
def format_f(f):
# Probably isn't always valid, but works for our numbers
return "{:f}".format(f).rstrip("0").rstrip(".")
# Folder for the logs/models
folder = "lr{lr}_sb{sb}_a{a}_b{b}_w{w}".format(
lr=format_f(base_lr),
sb=source_batch_size,
a=format_f(alpha),
b=format_f(beta),
w=format_f(loss_weight),
)
# Args that set the hyperparameters
options = "--base_lr {lr} " \
"--train_source_batch_size {sb} " \
"--inv_alpha {a} " \
"--inv_beta {b} " \
"--loss_weight {w}".format(
lr=format_f(base_lr),
sb=source_batch_size,
a=format_f(alpha),
b=format_f(beta),
w=format_f(loss_weight),
)
runlist.append((folder, options, the_tuple))
return runlist
if __name__ == "__main__":
seed = 42
max_experiments = 10
# Match that of hyperparameter_tuning_analysis.py probably
datasets = [
"ucihar", "ucihhar", "wisdm_ar", "wisdm_at",
"myo", "ninapro_db5_like_myo_noshift",
"normal_n12_l3_inter2_intra1_5,0,0,0_sine",
"normal_n12_l3_inter2_intra1_0,0.5,0,0_sine",
"normal_n12_l3_inter1_intra2_0,0,5,0_sine",
"normal_n12_l3_inter1_intra2_0,0,0,0.5_sine",
]
methods = ["can"]
hyperparam_list = generate_hyperparameter_list(
max_experiments=max_experiments,
seed=seed,
)
# [dataset][method] = ""
hyperparameter_tuning_experiments_list = collections.defaultdict(
lambda: collections.defaultdict(str)
)
for dataset in datasets:
for method in methods:
if method == "can":
hyperparameter_tuning_experiments_list[dataset][method] = hyperparam_list
else:
raise NotImplementedError(method + " experiments not implemented")
print("# Generated by hyperparameter_tuning_experiments_can.py. Changes will be overwritten.")
print_dictionary(hyperparameter_tuning_experiments_list,
"hyperparameter_tuning_experiments_list_can")