forked from nhanvtran/qubit-readout
-
Notifications
You must be signed in to change notification settings - Fork 0
/
hls4ml_synth.py
executable file
·157 lines (131 loc) · 5.24 KB
/
hls4ml_synth.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
"""
Load model and import to hls4ml for synthesizing
Precision vs Acciuracy:
ap_fixed<16,6> -
ap_fixed<8,3> -
hls4ml lacking support for pytorch
"""
import argparse
import hls4ml
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self, resized=False):
super(Net,self).__init__()
layer_1 = 256
layer_2 = 128
if resized:
layer_1 = 128
layer_2 = 32
self.fc1 = nn.Linear(2, layer_1)
self.act1 = nn.ReLU()
self.fc2 = nn.Linear(layer_1,layer_2)
self.act2 = nn.ReLU()
self.fc3 = nn.Linear(layer_2,3)
self.act3 = nn.ReLU()
def forward(self, x):
x = self.act1(self.fc1(x))
x = self.act2(self.fc2(x))
x = self.act3(self.fc3(x))
return x
def eval_hls_model(model, test_loader):
# initialize lists to monitor test loss and accuracy
y_true = np.array([], dtype=np.int32)
y_pred = np.array([], dtype=np.int32)
for data, target in test_loader:
data = np.ascontiguousarray(data.numpy())
target = np.ascontiguousarray(target.numpy())
output = model.predict(data)
ind = np.argmax(output, 1).astype(np.int32)
y_pred = np.concatenate((y_pred, ind))
y_true = np.concatenate((y_true, target))
acc = y_true - y_pred
accuracy = (len(y_true)-np.count_nonzero(acc))/len(y_true)
return accuracy
def expand_config(config, precision='ap_fixed<12,4>', reusefactor=1):
config['Model'] = {}
config['Model']['fc1'] = {}
config['Model']['fc2'] = {}
config['Model']['fc3'] = {}
config['Model']['fc1']['Precision'] = {}
config['Model']['fc2']['Precision'] = {}
config['Model']['fc3']['Precision'] = {}
# weight
config['Model']['fc1']['Precision']['weight'] = precision
config['Model']['fc2']['Precision']['weight'] = precision
config['Model']['fc3']['Precision']['weight'] = precision
# bias
config['Model']['fc1']['Precision']['bias'] = precision
config['Model']['fc2']['Precision']['bias'] = precision
config['Model']['fc3']['Precision']['bias'] = precision
# result
config['Model']['fc1']['Precision']['result'] = precision
config['Model']['fc2']['Precision']['result'] = precision
config['Model']['fc3']['Precision']['result'] = precision
# reusefactor
config['Model']['fc1']['ReuseFactor'] = reusefactor
config['Model']['fc2']['ReuseFactor'] = reusefactor
config['Model']['fc3']['ReuseFactor'] = reusefactor
## activation
config['Model']['act1'] = {}
config['Model']['act2'] = {}
config['Model']['act3'] = {}
config['Model']['act1']['Precision'] = {}
config['Model']['act2']['Precision'] = {}
config['Model']['act3']['Precision'] = {}
config['Model']['act1']['Precision'] = precision
config['Model']['act2']['Precision'] = precision
config['Model']['act3']['Precision'] = precision
config['Model']['act1']['ReuseFactor'] = reusefactor
config['Model']['act2']['ReuseFactor'] = reusefactor
config['Model']['act3']['ReuseFactor'] = reusefactor
def main(args):
filename = 'model_orig.pt'
to_file = 'model_orig.png'
output_dir = 'hls4ml_prj_orig'
if args.resized:
filename = 'model_resized.pt'
output_dir = 'hls4ml_prj_resized'
to_file = 'model_resized.png'
model = Net(args.resized)
model.load_state_dict(torch.load(filename))
# config model
config = hls4ml.utils.config_from_pytorch_model(model, granularity='model')
config['Model']['ReuseFactor'] = 128
config['Model']['Strategy'] = 'latency'
config['Model']['Precision'] = 'ap_fixed<12,6>'
expand_config(config)
if args.resized:
config['Model']['ReuseFactor'] = 1
config['Model']['Strategy'] = 'latency'
print('------------------------------------------------------')
print(config)
print('------------------------------------------------------')
# hls4ml model
hls_model = hls4ml.converters.convert_from_pytorch_model(model,
input_shape=[1, 2],
hls_config=config,
output_dir=output_dir,
part='xcu250-figd2104-2L-e')
# visualize model
hls4ml.utils.plot_model(hls_model, show_shapes=True, show_precision=True, to_file=to_file)
# compile and compare
hls_model.compile()
from MLP_Qubit_Readout_reduced import load_dataset
train_loader, test_loader = load_dataset()
hls_model_acc = eval_hls_model(hls_model, test_loader)
print('------------------------------------------------------')
print(f'hls4ml accuracy: {hls_model_acc}')
print('------------------------------------------------------')
# build and read report
if args.build:
hls_model.build(csim=False)
hls4ml.report.read_vivado_report(output_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Options for hls4ml synthesizing')
parser.add_argument('-r', '--resized', action='store_true')
parser.add_argument('-b', '--build', action='store_true')
args = parser.parse_args()
main(args)