-
Notifications
You must be signed in to change notification settings - Fork 5
/
hyperparameter_tuning_experiments.py
executable file
·127 lines (103 loc) · 4.32 KB
/
hyperparameter_tuning_experiments.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
#!/usr/bin/env python3
"""
Experiments to run for hyperparameter search
Usage:
./hyperparameter_tuning_experiments.py | tee hyperparameter_tuning_experiments_list.py
"""
import random
import collections
from print_dictionary import print_dictionary
def generate_hyperparameter_list(max_experiments=None, seed=None,
baseline=False, dataset=None, method=None):
lr_options = [0.00001, 0.0001, 0.001] # one power of 10 bigger/smaller than current
similarity_weight_options = [1.0, 10.0, 100.0]
max_positives_options = [5, 10]
neg_pos_ratio_options = [2, 4] # how many negatives for each positive
temperature_options = [0.01, 0.05, 0.1, 0.5]
experiments = []
if baseline:
# Baselines only use learning rate
for lr in lr_options:
# Other values don't matter, so just set to 0 or 1
experiments.append([lr, 0, 1, 1, 0])
else:
# All combinations of the above
for lr in lr_options:
for similarity_weight in similarity_weight_options:
for max_positives in max_positives_options:
for neg_pos_ratio in neg_pos_ratio_options:
max_negatives = neg_pos_ratio * max_positives
for temperature in temperature_options:
experiments.append([
lr,
similarity_weight,
max_positives,
max_negatives,
temperature,
])
# Limit number (if there's enough experiments)
if max_experiments is not None and len(experiments) > max_experiments:
if seed is not None:
random.Random(seed).shuffle(experiments)
else:
random.shuffle(experiments)
experiments = experiments[:max_experiments]
# Output list of hyperparameters for this method
runlist = []
for i, the_tuple in enumerate(experiments):
lr, w, p, n, t = the_tuple
def format_f(f):
# Probably isn't always valid, but works for our numbers
return "{:f}".format(f).rstrip("0").rstrip(".")
# Folder for the logs/models
folder = "lr{lr}_w{w}_p{p}_n{n}_t{t}".format(
lr=format_f(lr), w=format_f(w), p=p, n=n, t=format_f(t),
)
# Args that set the hyperparameters
options = "--lr={lr} " \
"--similarity_weight={w} " \
"--max_positives={p} " \
"--max_negatives={n} " \
"--temperature={t}".format(
lr=format_f(lr), w=format_f(w), p=p, n=n, t=format_f(t),
)
runlist.append((folder, options, the_tuple))
return runlist
if __name__ == "__main__":
seed = 42
max_experiments = 30
# Match that of hyperparameter_tuning_analysis.py probably
datasets = [
"ucihar", "ucihhar", "wisdm_ar", "wisdm_at",
"myo", "ninapro_db5_like_myo_noshift",
"normal_n12_l3_inter2_intra1_5,0,0,0_sine",
"normal_n12_l3_inter2_intra1_0,0.5,0,0_sine",
"normal_n12_l3_inter1_intra2_0,0,5,0_sine",
"normal_n12_l3_inter1_intra2_0,0,0,0.5_sine",
]
methods = ["none", "codats", "calda_xs_h", "upper"]
our_method_list = generate_hyperparameter_list(
max_experiments=max_experiments,
seed=seed,
baseline=False,
)
baseline_list = generate_hyperparameter_list(
max_experiments=max_experiments,
seed=seed,
baseline=True,
)
# [dataset][method] = ""
hyperparameter_tuning_experiments_list = collections.defaultdict(
lambda: collections.defaultdict(str)
)
for dataset in datasets:
for method in methods:
if method == "calda_xs_h":
hyperparameter_tuning_experiments_list[dataset][method] = our_method_list
elif method == "none" or method == "upper" or method == "codats":
hyperparameter_tuning_experiments_list[dataset][method] = baseline_list
else:
raise NotImplementedError(method + " experiments not implemented")
print("# Generated by hyperparameter_tuning_experiments.py. Changes will be overwritten.")
print_dictionary(hyperparameter_tuning_experiments_list,
"hyperparameter_tuning_experiments_list")