From 661731007fbe29c76963c5c981f495b38d4694bd Mon Sep 17 00:00:00 2001 From: Jovan Mitrevski Date: Wed, 11 Dec 2024 00:26:16 -0600 Subject: [PATCH 1/9] don't overwrite already set accum_t, fix pointwise output res --- hls4ml/backends/fpga/fpga_layers.py | 10 ++++++---- hls4ml/model/layers.py | 10 ++++++---- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/hls4ml/backends/fpga/fpga_layers.py b/hls4ml/backends/fpga/fpga_layers.py index 356973517c..0026ebe213 100644 --- a/hls4ml/backends/fpga/fpga_layers.py +++ b/hls4ml/backends/fpga/fpga_layers.py @@ -73,12 +73,14 @@ def set_thresholds(self, scale, bias, ternary_threshold=0.5): class PointwiseConv1D(Conv1D): '''Optimized Conv1D implementation for 1x1 kernels.''' - # Nothing to do, will pick up function and config from class name - pass + def initialize(self): + # Do noting, values copied + pass class PointwiseConv2D(Conv2D): '''Optimized Conv2D implementation for 1x1 kernels.''' - # Nothing to do, will pick up function and config from class name - pass + def initialize(self): + # Do noting, values copied + pass diff --git a/hls4ml/model/layers.py b/hls4ml/model/layers.py index edd0051c6e..3847cda9cf 100644 --- a/hls4ml/model/layers.py +++ b/hls4ml/model/layers.py @@ -176,10 +176,12 @@ def _wrap_precision_to_type(self, name, precision): return NamedType(name=name, precision=precision) def _set_accum_t(self): - has_accum_t = any(a for a in self.expected_attributes if a.name == 'accum_t' and isinstance(a, TypeAttribute)) - if has_accum_t: - accum_t = NamedType(*reversed(self.model.config.get_precision(self, 'accum'))) - self.set_attr('accum_t', accum_t) + """Set the accumulator, but don't overwrite an existing one""" + if self.get_attr('accum_t') is None: + has_accum_t = any(a for a in self.expected_attributes if a.name == 'accum_t' and isinstance(a, TypeAttribute)) + if has_accum_t: + accum_t = NamedType(*reversed(self.model.config.get_precision(self, 'accum'))) + self.set_attr('accum_t', accum_t) def _set_type_t(self, name): has_type_t = any(a for a in self.expected_attributes if a.name == name + '_t' and isinstance(a, TypeAttribute)) From f211a0e32e7bbc4a608c19e353b8648f9d5c03b7 Mon Sep 17 00:00:00 2001 From: Jan-Frederik Schulte Date: Fri, 13 Dec 2024 15:07:03 -0500 Subject: [PATCH 2/9] split hgq tests and isolate qkeras tests to make tests run in under 1h --- test/pytest/generate_ci_yaml.py | 2 +- test/pytest/test_hgq_layers.py | 85 ---------------- test/pytest/test_hgq_players.py | 169 ++++++++++++++++++++++++++++++++ 3 files changed, 170 insertions(+), 86 deletions(-) create mode 100644 test/pytest/test_hgq_players.py diff --git a/test/pytest/generate_ci_yaml.py b/test/pytest/generate_ci_yaml.py index b130b43cef..c83e7ad5c7 100644 --- a/test/pytest/generate_ci_yaml.py +++ b/test/pytest/generate_ci_yaml.py @@ -24,7 +24,7 @@ BLACKLIST = {'test_reduction'} # Long-running tests will not be bundled with other tests -LONGLIST = {'test_hgq_layers'} +LONGLIST = {'test_hgq_layers','test_hgq_players','test_qkeras'} def path_to_name(test_path): diff --git a/test/pytest/test_hgq_layers.py b/test/pytest/test_hgq_layers.py index 92a7ea1876..4605394409 100644 --- a/test/pytest/test_hgq_layers.py +++ b/test/pytest/test_hgq_layers.py @@ -79,51 +79,6 @@ def run_model_test( _run_synth_match_test(proxy, data, io_type, backend, dir, cond=cond) -def create_player_model(layer: str, rnd_strategy: str, io_type: str): - pa_config = get_default_paq_conf() - pa_config['rnd_strategy'] = rnd_strategy - pa_config['skip_dims'] = 'all' if io_type == 'io_stream' else 'batch' - set_default_paq_conf(pa_config) - - inp = keras.Input(shape=(15)) - if 'PConcatenate' in layer: - _inp = [HQuantize()(inp)] * 2 - out = eval(layer)(_inp) - out = HDense(15)(out) - return keras.Model(inp, out) - elif 'Signature' in layer: - _inp = eval(layer)(inp) - out = HDense(15)(_inp) - return keras.Model(inp, out) - elif 'Pool2D' in layer: - _inp = PReshape((3, 5, 1))(HQuantize()(inp)) - elif 'Pool1D' in layer: - _inp = PReshape((5, 3))(HQuantize()(inp)) - elif 'Dense' in layer or 'Activation' in layer: - _inp = HQuantize()(inp) - elif 'Flatten' in layer: - out = HQuantize()(inp) - out = PReshape((3, 5))(out) - out = HConv1D(2, 2)(out) - out = eval(layer)(out) - out = HDense(15)(out) - return keras.Model(inp, out) - else: - raise Exception(f'Please add test for {layer}') - - out = eval(layer)(_inp) - model = keras.Model(inp, out) - - for layer in model.layers: - # No weight bitwidths to randomize - # And activation bitwidths - if hasattr(layer, 'paq'): - fbw: tf.Variable = layer.paq.fbw - fbw.assign(tf.constant(np.random.uniform(4, 6, fbw.shape).astype(np.float32))) - - return model - - def create_hlayer_model(layer: str, rnd_strategy: str, io_type: str): pa_config = get_default_paq_conf() pa_config['rnd_strategy'] = rnd_strategy @@ -222,43 +177,3 @@ def test_syn_hlayers(layer, N: int, rnd_strategy: str, io_type: str, cover_facto path = test_path / f'hls4mlprj_hgq_{layer}_{rnd_strategy}_{io_type}_{aggressive}_{backend}' run_model_test(model, cover_factor, data, io_type, backend, str(path), aggressive, cond=cond) - - -@pytest.mark.parametrize( - 'layer', - [ - "PConcatenate()", - "PMaxPool1D(2, padding='same')", - "PMaxPool1D(4, padding='same')", - "PMaxPool2D((5,3), padding='same')", - "PMaxPool1D(2, padding='valid')", - "PMaxPool2D((2,3), padding='valid')", - "Signature(1,6,3)", - "PAvgPool1D(2, padding='same')", - "PAvgPool2D((1,2), padding='same')", - "PAvgPool2D((2,2), padding='same')", - "PAvgPool1D(2, padding='valid')", - "PAvgPool2D((1,2), padding='valid')", - "PAvgPool2D((2,2), padding='valid')", - "PFlatten()", - ], -) -@pytest.mark.parametrize("N", [1000]) -@pytest.mark.parametrize("rnd_strategy", ['floor', 'standard_round']) -@pytest.mark.parametrize("io_type", ['io_parallel', 'io_stream']) -@pytest.mark.parametrize("cover_factor", [1.0]) -@pytest.mark.parametrize("aggressive", [True, False]) -@pytest.mark.parametrize("backend", ['vivado', 'vitis']) -def test_syn_players(layer, N: int, rnd_strategy: str, io_type: str, cover_factor: float, aggressive: bool, backend: str): - model = create_player_model(layer=layer, rnd_strategy=rnd_strategy, io_type=io_type) - data = get_data((N, 15), 7, 1) - - path = test_path / f'hls4mlprj_hgq_{layer}_{rnd_strategy}_{io_type}_{aggressive}_{backend}' - - if 'Signature' in layer: - q = gfixed(1, 6, 3) - data = q(data).numpy() - if "padding='same'" in layer and io_type == 'io_stream': - pytest.skip("io_stream does not support padding='same' for pools at the moment") - - run_model_test(model, cover_factor, data, io_type, backend, str(path), aggressive) diff --git a/test/pytest/test_hgq_players.py b/test/pytest/test_hgq_players.py new file mode 100644 index 0000000000..db44328a2d --- /dev/null +++ b/test/pytest/test_hgq_players.py @@ -0,0 +1,169 @@ +from pathlib import Path + +import HGQ # noqa: F401 +import numpy as np +import pytest +import tensorflow as tf +from HGQ import get_default_paq_conf, set_default_paq_conf, trace_minmax +from HGQ.layers import ( # noqa: F401 + HConv1D, + HDense, + HQuantize, + PAvgPool1D, + PAvgPool2D, + PConcatenate, + PFlatten, + PMaxPool1D, + PMaxPool2D, + PReshape, + Signature, +) +from HGQ.proxy import to_proxy_model +from HGQ.proxy.fixed_point_quantizer import gfixed +from tensorflow import keras + +from hls4ml.converters import convert_from_keras_model + +# tf.config.experimental_run_functions_eagerly(True) # noqa + + +test_path = Path(__file__).parent + + +def _run_synth_match_test(proxy: keras.Model, data, io_type: str, backend: str, dir: str, cond=None): + + output_dir = dir + '/hls4ml_prj' + hls_model = convert_from_keras_model( + proxy, + io_type=io_type, + output_dir=output_dir, + backend=backend, + hls_config={'Model': {'Precision': 'fixed<1,0>', 'ReuseFactor': 1}}, + ) + hls_model.compile() + + data_len = data.shape[0] if isinstance(data, np.ndarray) else data[0].shape[0] + # Multiple output case. Check each output separately + if len(proxy.outputs) > 1: # type: ignore + r_proxy: list[np.ndarray] = [x.numpy() for x in proxy(data)] # type: ignore + r_hls: list[np.ndarray] = hls_model.predict(data) # type: ignore + r_hls = [x.reshape(r_proxy[i].shape) for i, x in enumerate(r_hls)] + else: + r_proxy: list[np.ndarray] = [proxy(data).numpy()] # type: ignore + r_hls: list[np.ndarray] = [hls_model.predict(data).reshape(r_proxy[0].shape)] # type: ignore + + errors = [] + for i, (p, h) in enumerate(zip(r_proxy, r_hls)): + try: + if cond is None: + mismatch_ph = p != h + assert ( + np.sum(mismatch_ph) == 0 + ), f"Proxy-HLS4ML mismatch for out {i}: {np.sum(np.any(mismatch_ph, axis=1))} out of {data_len} samples are different. Sample: {p[mismatch_ph].ravel()[:5]} vs {h[mismatch_ph].ravel()[:5]}" # noqa: E501 + else: + cond(p, h) + except AssertionError as e: + errors.append(e) + if len(errors) > 0: + msgs = [str(e) for e in errors] + raise AssertionError('\n'.join(msgs)) + + +def run_model_test( + model: keras.Model, cover_factor: float | None, data, io_type: str, backend: str, dir: str, aggressive: bool, cond=None +): + data_len = data.shape[0] if isinstance(data, np.ndarray) else data[0].shape[0] + if cover_factor is not None: + trace_minmax(model, data, cover_factor=cover_factor, bsz=data_len) + proxy = to_proxy_model(model, aggressive=aggressive, unary_lut_max_table_size=4096) + _run_synth_match_test(proxy, data, io_type, backend, dir, cond=cond) + + +def create_player_model(layer: str, rnd_strategy: str, io_type: str): + pa_config = get_default_paq_conf() + pa_config['rnd_strategy'] = rnd_strategy + pa_config['skip_dims'] = 'all' if io_type == 'io_stream' else 'batch' + set_default_paq_conf(pa_config) + + inp = keras.Input(shape=(15)) + if 'PConcatenate' in layer: + _inp = [HQuantize()(inp)] * 2 + out = eval(layer)(_inp) + out = HDense(15)(out) + return keras.Model(inp, out) + elif 'Signature' in layer: + _inp = eval(layer)(inp) + out = HDense(15)(_inp) + return keras.Model(inp, out) + elif 'Pool2D' in layer: + _inp = PReshape((3, 5, 1))(HQuantize()(inp)) + elif 'Pool1D' in layer: + _inp = PReshape((5, 3))(HQuantize()(inp)) + elif 'Dense' in layer or 'Activation' in layer: + _inp = HQuantize()(inp) + elif 'Flatten' in layer: + out = HQuantize()(inp) + out = PReshape((3, 5))(out) + out = HConv1D(2, 2)(out) + out = eval(layer)(out) + out = HDense(15)(out) + return keras.Model(inp, out) + else: + raise Exception(f'Please add test for {layer}') + + out = eval(layer)(_inp) + model = keras.Model(inp, out) + + for layer in model.layers: + # No weight bitwidths to randomize + # And activation bitwidths + if hasattr(layer, 'paq'): + fbw: tf.Variable = layer.paq.fbw + fbw.assign(tf.constant(np.random.uniform(4, 6, fbw.shape).astype(np.float32))) + + return model + +def get_data(shape: tuple[int, ...], v: float, max_scale: float): + rng = np.random.default_rng() + a1 = rng.uniform(-v, v, shape).astype(np.float32) + a2 = rng.uniform(0, max_scale, (1, shape[1])).astype(np.float32) + return (a1 * a2).astype(np.float32) + +@pytest.mark.parametrize( + 'layer', + [ + "PConcatenate()", + "PMaxPool1D(2, padding='same')", + "PMaxPool1D(4, padding='same')", + "PMaxPool2D((5,3), padding='same')", + "PMaxPool1D(2, padding='valid')", + "PMaxPool2D((2,3), padding='valid')", + "Signature(1,6,3)", + "PAvgPool1D(2, padding='same')", + "PAvgPool2D((1,2), padding='same')", + "PAvgPool2D((2,2), padding='same')", + "PAvgPool1D(2, padding='valid')", + "PAvgPool2D((1,2), padding='valid')", + "PAvgPool2D((2,2), padding='valid')", + "PFlatten()", + ], +) +@pytest.mark.parametrize("N", [1000]) +@pytest.mark.parametrize("rnd_strategy", ['floor', 'standard_round']) +@pytest.mark.parametrize("io_type", ['io_parallel', 'io_stream']) +@pytest.mark.parametrize("cover_factor", [1.0]) +@pytest.mark.parametrize("aggressive", [True, False]) +@pytest.mark.parametrize("backend", ['vivado', 'vitis']) +def test_syn_players(layer, N: int, rnd_strategy: str, io_type: str, cover_factor: float, aggressive: bool, backend: str): + model = create_player_model(layer=layer, rnd_strategy=rnd_strategy, io_type=io_type) + data = get_data((N, 15), 7, 1) + + path = test_path / f'hls4mlprj_hgq_{layer}_{rnd_strategy}_{io_type}_{aggressive}_{backend}' + + if 'Signature' in layer: + q = gfixed(1, 6, 3) + data = q(data).numpy() + if "padding='same'" in layer and io_type == 'io_stream': + pytest.skip("io_stream does not support padding='same' for pools at the moment") + + run_model_test(model, cover_factor, data, io_type, backend, str(path), aggressive) From 82ab6bfc9e4a71f4316b7cad437270495711dad4 Mon Sep 17 00:00:00 2001 From: Jan-Frederik Schulte Date: Fri, 13 Dec 2024 15:09:23 -0500 Subject: [PATCH 3/9] pre-commit --- test/pytest/generate_ci_yaml.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/pytest/generate_ci_yaml.py b/test/pytest/generate_ci_yaml.py index c83e7ad5c7..0714a4acce 100644 --- a/test/pytest/generate_ci_yaml.py +++ b/test/pytest/generate_ci_yaml.py @@ -18,13 +18,14 @@ EXAMPLEMODEL: {} """ + n_test_files_per_yml = int(os.environ.get('N_TESTS_PER_YAML', 4)) # Blacklisted tests will be skipped BLACKLIST = {'test_reduction'} # Long-running tests will not be bundled with other tests -LONGLIST = {'test_hgq_layers','test_hgq_players','test_qkeras'} +LONGLIST = {'test_hgq_layers', 'test_hgq_players', 'test_qkeras'} def path_to_name(test_path): From 96da3fe5d348e06b53cd4e1ce1130cbb4db41580 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 13 Dec 2024 20:13:23 +0000 Subject: [PATCH 4/9] [pre-commit.ci] auto fixes from pre-commit hooks --- test/pytest/test_hgq_players.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/pytest/test_hgq_players.py b/test/pytest/test_hgq_players.py index db44328a2d..9c4b40f97f 100644 --- a/test/pytest/test_hgq_players.py +++ b/test/pytest/test_hgq_players.py @@ -123,12 +123,14 @@ def create_player_model(layer: str, rnd_strategy: str, io_type: str): return model + def get_data(shape: tuple[int, ...], v: float, max_scale: float): rng = np.random.default_rng() a1 = rng.uniform(-v, v, shape).astype(np.float32) a2 = rng.uniform(0, max_scale, (1, shape[1])).astype(np.float32) return (a1 * a2).astype(np.float32) + @pytest.mark.parametrize( 'layer', [ From 8a018f16773c368ca83b4ee5774e2da33e09d28c Mon Sep 17 00:00:00 2001 From: Jan-Frederik Schulte Date: Fri, 13 Dec 2024 15:15:21 -0500 Subject: [PATCH 5/9] remove unnecessary import --- test/pytest/test_hgq_layers.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test/pytest/test_hgq_layers.py b/test/pytest/test_hgq_layers.py index 4605394409..80d96fbcda 100644 --- a/test/pytest/test_hgq_layers.py +++ b/test/pytest/test_hgq_layers.py @@ -19,7 +19,6 @@ Signature, ) from HGQ.proxy import to_proxy_model -from HGQ.proxy.fixed_point_quantizer import gfixed from tensorflow import keras from hls4ml.converters import convert_from_keras_model From 46bdacc05c359531c5070f647fea23093dbc90f0 Mon Sep 17 00:00:00 2001 From: Jovan Mitrevski Date: Fri, 13 Dec 2024 14:38:29 -0600 Subject: [PATCH 6/9] update example-model --- example-models | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/example-models b/example-models index 6a82da23ad..c6bb3c0686 160000 --- a/example-models +++ b/example-models @@ -1 +1 @@ -Subproject commit 6a82da23ad24c238fe156ed4d0aa907db547dbcf +Subproject commit c6bb3c0686d52439d8c53d7407903bf78e852562 From 1d0cf1e28d5ecdccb1b1dc1786ee8e467cfd019e Mon Sep 17 00:00:00 2001 From: Jovan Mitrevski Date: Fri, 13 Dec 2024 14:39:21 -0600 Subject: [PATCH 7/9] change order of optimizers --- hls4ml/model/optimizer/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hls4ml/model/optimizer/__init__.py b/hls4ml/model/optimizer/__init__.py index 3302e3c691..a745eceba1 100644 --- a/hls4ml/model/optimizer/__init__.py +++ b/hls4ml/model/optimizer/__init__.py @@ -59,7 +59,6 @@ 'convert', [ 'channels_last_converter', - 'merge_linear_activation', 'seperable_to_depthwise_and_conv', 'remove_transpose_before_flatten', 'remove_nop_transpose', @@ -74,6 +73,7 @@ 'replace_multidimensional_dense_with_conv', 'enforce_proxy_model_embedded_config', 'eliminate_linear_activation', + 'merge_linear_activation', # many of the above optimzers need to be done before this 'infer_precision_types', ], From eabb785dc8a748987429cfaefd11c82eef8d285a Mon Sep 17 00:00:00 2001 From: Jan-Frederik Schulte Date: Fri, 13 Dec 2024 17:31:06 -0500 Subject: [PATCH 8/9] fix example-models setting for long running pytetss --- test/pytest/generate_ci_yaml.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/pytest/generate_ci_yaml.py b/test/pytest/generate_ci_yaml.py index 0714a4acce..4ff9b85723 100644 --- a/test/pytest/generate_ci_yaml.py +++ b/test/pytest/generate_ci_yaml.py @@ -72,7 +72,7 @@ def generate_test_yaml(test_root='.'): name = path.stem.replace('test_', '') test_file = str(path.relative_to(test_root)) needs_examples = uses_example_model(path) - diff_yml = yaml.safe_load(template.format(name, test_file, needs_examples)) + diff_yml = yaml.safe_load(template.format(name, test_file, int(needs_examples))) yml.update(diff_yml) return yml From fb120403ff800689059b6bf2a6adc320d185c68b Mon Sep 17 00:00:00 2001 From: Jan-Frederik Schulte Date: Fri, 13 Dec 2024 19:27:15 -0500 Subject: [PATCH 9/9] add pytorch to long tests --- test/pytest/generate_ci_yaml.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/pytest/generate_ci_yaml.py b/test/pytest/generate_ci_yaml.py index 4ff9b85723..adc3d680ab 100644 --- a/test/pytest/generate_ci_yaml.py +++ b/test/pytest/generate_ci_yaml.py @@ -25,7 +25,7 @@ BLACKLIST = {'test_reduction'} # Long-running tests will not be bundled with other tests -LONGLIST = {'test_hgq_layers', 'test_hgq_players', 'test_qkeras'} +LONGLIST = {'test_hgq_layers', 'test_hgq_players', 'test_qkeras', 'test_pytorch_api'} def path_to_name(test_path):