diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml
index 1e67a5c6..59d5c60c 100644
--- a/.github/workflows/deploy.yml
+++ b/.github/workflows/deploy.yml
@@ -25,7 +25,7 @@ jobs:
channels: conda-forge
activate-environment: hls4ml-tutorial
environment-file: environment.yml
- python-version: 3.10.10
+ python-version: 3.10.16
auto-activate-base: false
# Check dependencies
diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml
deleted file mode 100644
index 6b99bb2a..00000000
--- a/.github/workflows/docker-publish.yml
+++ /dev/null
@@ -1,38 +0,0 @@
-name: docker-publish
-
-on:
- pull_request:
- branches:
- - 'main'
- push:
- branches:
- - 'main'
-
-jobs:
- docker:
- runs-on: ubuntu-latest
- permissions:
- contents: read
- packages: write
- steps:
- -
- name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v3
- -
- name: Login to GitHub Container Registry
- uses: docker/login-action@v3
- with:
- registry: ghcr.io
- username: ${{ github.repository_owner }}
- password: ${{ secrets.GITHUB_TOKEN }}
- -
- name: Build and push
- uses: docker/build-push-action@v6
- with:
- push: ${{ github.event_name != 'pull_request' }}
- file: docker/Dockerfile
- cache-from: type=gha
- cache-to: type=gha,mode=max
- tags: |
- ghcr.io/${{ github.repository }}/hls4ml-0.8.0:latest
- ghcr.io/${{ github.repository }}/hls4ml-0.8.0:${{ github.sha }}
diff --git a/README.md b/README.md
index c3696423..45fd2bff 100644
--- a/README.md
+++ b/README.md
@@ -13,50 +13,18 @@ There are several ways to run the tutorial notebooks:
[![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/fastmachinelearning/hls4ml-tutorial/HEAD)
## Conda
+Running the tutorials requires AMD Vitis HLS to be installed, see [here](https://www.xilinx.com/support/download/index.html/content/xilinx/en/downloadNav/vitis.html).
+After the installation, the necessary environmental variables can be set using
+```
+source /path/to/your/installtion/Xilinx/Vitis_HLS/202X.X/settings64.(c)sh
+```
+
The Python environment used for the tutorials is specified in the `environment.yml` file.
It can be setup like:
```bash
conda env create -f environment.yml
conda activate hls4ml-tutorial
-```
-
-## Docker without Vivado
-Pull the prebuilt image from the GitHub Container Registry:
-```bash
-docker pull ghcr.io/fastmachinelearning/hls4ml-tutorial/hls4ml-0.8.0:latest
-```
-
-Follow these steps to build a Docker image that can be used locally, or on a JupyterHub instance.
-You can build the image (without Vivado):
-```bash
-docker build https://github.com/fastmachinelearning/hls4ml-tutorial -f docker/Dockerfile
-```
-Alternatively, you can clone the repository and build locally:
-```bash
-git clone https://github.com/fastmachinelearning/hls4ml-tutorial
-cd hls4ml-tutorial
-docker build -f docker/Dockerfile -t ghcr.io/fastmachinelearning/hls4ml-tutorial/hls4ml-0.8.0:latest .
-```
-Then to start the container:
-```bash
-docker run -p 8888:8888 ghcr.io/fastmachinelearning/hls4ml-tutorial/hls4ml-0.8.0:latest
-```
-When the container starts, the Jupyter notebook server is started, and the link to open it in your browser is printed.
-You can clone the repository inside the container and run the notebooks.
-
-## Docker with Vivado
-Pull the prebuilt image from the GitHub Container Registry:
-```bash
-docker pull ghcr.io/fastmachinelearning/hls4ml-tutorial/hls4ml-0.8.0-vivado-2019.2:latest
-```
-
-To build the image with Vivado, run (Warning: takes a long time and requires a lot of disk space):
-```bash
-docker build -f docker/Dockerfile.vivado -t ghcr.io/fastmachinelearning/hls4ml-tutorial/hls4ml-0.8.0-vivado-2019.2:latest .
-```
-Then to start the container:
-```bash
-docker run -p 8888:8888 ghcr.io/fastmachinelearning/hls4ml-tutorial/hls4ml-0.8.0-vivado-2019.2:latest
+source /path/to/your/installtion/Xilinx/Vitis_HLS/202X.X/settings64.(c)sh
```
## Companion material
diff --git a/docker/Dockerfile b/docker/Dockerfile
deleted file mode 100644
index a4db3be3..00000000
--- a/docker/Dockerfile
+++ /dev/null
@@ -1,40 +0,0 @@
-FROM jupyter/tensorflow-notebook:tensorflow-2.11.1
-
-# Install prequisites
-USER root
-RUN apt-get update -y && \
- apt-get install --no-install-recommends -y \
- curl \
- libtinfo5 \
- libc6-dev-i386 \
- net-tools \
- graphviz \
- make \
- unzip \
- g++ \
- xvfb \
- git \
- libncursesw5 \
- libc6-dev-i386 && \
- apt-get clean && \
- rm -rf /var/lib/apt/lists/*
-
-# Install hls4ml and dependencies
-USER ${NB_USER}
-RUN mamba install -y -c conda-forge \
- graphviz==7.1.0 \
- pydot==1.4.2 \
- tensorflow-datasets==4.8.3 \
- jupyter-book==0.15.1 \
- jupyter_contrib_nbextensions==0.7.0
-RUN pip install \
- hls4ml[profiling]==0.8.0 \
- qkeras==0.9.0 \
- conifer==0.2b0 \
- pysr==0.16.3
-RUN mamba clean --all -f -y && \
- mamba list && \
- fix-permissions "${CONDA_DIR}" && \
- fix-permissions "/home/${NB_USER}"
-
-LABEL org.opencontainers.image.source https://github.com/fastmachinelearning/hls4ml-tutorial
diff --git a/docker/Dockerfile.vivado b/docker/Dockerfile.vivado
deleted file mode 100644
index 166fa098..00000000
--- a/docker/Dockerfile.vivado
+++ /dev/null
@@ -1,48 +0,0 @@
-FROM jupyter/tensorflow-notebook:tensorflow-2.11.1
-
-# Install prequisites
-USER root
-RUN apt-get update -y && \
- apt-get install --no-install-recommends -y \
- curl \
- libtinfo5 \
- libc6-dev-i386 \
- net-tools \
- graphviz \
- make \
- unzip \
- g++ \
- xvfb \
- git \
- libncursesw5 \
- libc6-dev-i386 && \
- apt-get clean && \
- rm -rf /var/lib/apt/lists/*
-
-# Install Vivado 2019.2
-COPY docker/vivado_cfg.txt /tmp/vivado_cfg.txt
-COPY docker/install_vivado.sh /tmp/install_vivado.sh
-RUN source /tmp/install_vivado.sh && rm /tmp/install_vivado.sh
-
-# Install hls4ml and dependencies
-USER ${NB_USER}
-RUN mamba install -y -c conda-forge \
- graphviz==7.1.0 \
- pydot==1.4.2 \
- tensorflow-datasets==4.8.3 \
- jupyter-book==0.15.1 \
- jupyter_contrib_nbextensions==0.7.0
-RUN pip install \
- hls4ml[profiling]==0.8.0 \
- qkeras==0.9.0 \
- conifer==0.2b0 \
- pysr==0.16.3
-RUN mamba clean --all -f -y && \
- mamba list && \
- fix-permissions "${CONDA_DIR}" && \
- fix-permissions "/home/${NB_USER}"
-
-LABEL org.opencontainers.image.source https://github.com/fastmachinelearning/hls4ml-tutorial
-
-# ENV XILINX_VIVADO /opt/Xilinx/Vivado/2019.2
-COPY docker/start-notebook.sh /usr/local/bin/
diff --git a/docker/install_vivado.sh b/docker/install_vivado.sh
deleted file mode 100644
index 6990e801..00000000
--- a/docker/install_vivado.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/env bash
-
-function main() {
- # Install Vivado; to speed up build, download files from local webserver
- # See: https://stackoverflow.com/questions/26692708/how-to-add-a-file-to-an-image-in-dockerfile-without-using-the-add-or-copy-direct
- cd /tmp/
- curl https://www.dropbox.com/s/wvp50u7h2jroict/vivado.tar.gz?dl=1 -L -o vivado.tar.gz
- # curl http://169.228.130.58:8000/vivado.tar.gz -o vivado.tar.gz
- tar -xzf vivado.tar.gz
- cd Xilinx_Vivado_2019.2_1106_2127
- ./xsetup --agree XilinxEULA,3rdPartyEULA,WebTalkTerms --batch Install --config /tmp/vivado_cfg.txt
- cd ..
- rm -r Xilinx_Vivado_2019.2_1106_2127
- rm vivado.tar.gz
- rm /tmp/vivado_cfg.txt
-
- # Install the pynq-z2 board files
- curl https://www.dropbox.com/s/meufyrhgcg38i12/pynq-z2.zip?dl=1 -L -o pynq-z2.zip
- # curl http://169.228.130.58:8000/pynq-z2.zip -o pynq-z2.zip
- unzip pynq-z2.zip
- rm pynq-z2.zip
- mv pynq-z2 /opt/Xilinx/Vivado/2019.2/data/boards/board_files/
-
- # Apply Vivado's y2k22 patch
- curl https://www.dropbox.com/s/3gv1jq9074d582o/y2k22_patch.zip?dl=1 -L -o y2k22_patch.zip
- # curl http://169.228.130.58:8000/y2k22_patch.zip -o y2k22_patch.zip
- mv y2k22_patch.zip /opt/Xilinx
- cd /opt/Xilinx
- unzip y2k22_patch.zip
- python y2k22_patch/patch.py
-}
-
-main "$@" || exit 1
diff --git a/docker/start-notebook.sh b/docker/start-notebook.sh
deleted file mode 100755
index d0716b4c..00000000
--- a/docker/start-notebook.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-# Copyright (c) Jupyter Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-set -e
-
-# setup vivado 2019.2
-source /opt/Xilinx/Vivado/2019.2/settings64.sh
-
-# The Jupyter command to launch
-# JupyterLab by default
-DOCKER_STACKS_JUPYTER_CMD="${DOCKER_STACKS_JUPYTER_CMD:=lab}"
-
-if [[ -n "${JUPYTERHUB_API_TOKEN}" ]]; then
- echo "WARNING: using start-singleuser.sh instead of start-notebook.sh to start a server associated with JupyterHub."
- exec /usr/local/bin/start-singleuser.sh "$@"
-fi
-
-wrapper=""
-if [[ "${RESTARTABLE}" == "yes" ]]; then
- wrapper="run-one-constantly"
-fi
-
-# shellcheck disable=SC1091,SC2086
-exec /usr/local/bin/start.sh ${wrapper} jupyter ${DOCKER_STACKS_JUPYTER_CMD} ${NOTEBOOK_ARGS} "$@"
diff --git a/docker/vivado_cfg.txt b/docker/vivado_cfg.txt
deleted file mode 100644
index 5c78689a..00000000
--- a/docker/vivado_cfg.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-#### Vivado HL WebPACK Install Configuration ####
-Edition=Vivado HL WebPACK
-
-# Path where Xilinx software will be installed.
-Destination=/opt/Xilinx
-
-# Choose the Products/Devices the you would like to install.
-Modules=Virtex UltraScale+ HBM:0,Zynq UltraScale+ MPSoC:0,DocNav:1,Kintex UltraScale:0,Zynq-7000:1,System Generator for DSP:0,Virtex UltraScale+:1,Kintex UltraScale+:0,Model Composer:0
-
-# Choose the post install scripts you'd like to run as part of the finalization step. Please note that some of these scripts may require user interaction during runtime.
-InstallOptions=
-
-## Shortcuts and File associations ##
-# Choose whether Start menu/Application menu shortcuts will be created or not.
-CreateProgramGroupShortcuts=1
-
-# Choose the name of the Start menu/Application menu shortcut. This setting will be ignored if you choose NOT to create shortcuts.
-ProgramGroupFolder=Xilinx Design Tools
-
-# Choose whether shortcuts will be created for All users or just the Current user. Shortcuts can be created for all users only if you run the installer as administrator.
-CreateShortcutsForAllUsers=0
-
-# Choose whether shortcuts will be created on the desktop or not.
-CreateDesktopShortcuts=1
-
-# Choose whether file associations will be created or not.
-CreateFileAssociation=1
-
-# Choose whether disk usage will be optimized (reduced) after installation
-EnableDiskUsageOptimization=1
diff --git a/environment.yml b/environment.yml
index 406c5a43..0b7251ee 100644
--- a/environment.yml
+++ b/environment.yml
@@ -2,21 +2,21 @@ name: hls4ml-tutorial
channels:
- conda-forge
dependencies:
- - python=3.10.10
- - jupyter_contrib_nbextensions==0.7.0
- - jupyterhub==3.1.1
- - jupyter-book==0.15.1
+ - python=3.10.16
+ - jupyter_contrib_nbextensions
+ - jupyterhub
+ - jupyter-book
- jsonschema-with-format-nongpl
- pydot==1.4.2
- graphviz==7.1.0
- scikit-learn==1.2.2
- - tensorflow==2.11.1
+ - tensorflow==2.14.0
- tensorflow-datasets==4.8.3
- webcolors
- widgetsnbextension==3.6.0
- pip==23.0.1
- pip:
- - hls4ml[profiling]==0.8.0
- - qkeras==0.9.0
- - conifer==0.2b0
+ - hls4ml[profiling,optimization,sr,HGQ]==1.0.0
+ - conifer==1.5
- pysr==0.16.3
+ - xgboost==1.7.5
diff --git a/images/part5_floorplan.png b/images/part5_floorplan.png
new file mode 100644
index 00000000..768d7374
Binary files /dev/null and b/images/part5_floorplan.png differ
diff --git a/part1_getting_started.ipynb b/part1_getting_started.ipynb
index 6afe89b8..955cae81 100644
--- a/part1_getting_started.ipynb
+++ b/part1_getting_started.ipynb
@@ -27,7 +27,7 @@
"tf.random.set_seed(seed)\n",
"import os\n",
"\n",
- "os.environ['PATH'] = os.environ['XILINX_VIVADO'] + '/bin:' + os.environ['PATH']"
+ "os.environ['PATH'] = os.environ['XILINX_VITIS'] + '/bin:' + os.environ['PATH']"
]
},
{
@@ -188,7 +188,7 @@
" X_train_val,\n",
" y_train_val,\n",
" batch_size=1024,\n",
- " epochs=30,\n",
+ " epochs=10,\n",
" validation_split=0.25,\n",
" shuffle=True,\n",
" callbacks=callbacks.callbacks,\n",
@@ -224,14 +224,13 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"## Convert the model to FPGA firmware with hls4ml\n",
"Now we will go through the steps to convert the model we trained to a low-latency optimized FPGA firmware with hls4ml.\n",
"First, we will evaluate its classification performance to make sure we haven't lost accuracy using the fixed-point data types. \n",
- "Then we will synthesize the model with Vivado HLS and check the metrics of latency and FPGA resource usage.\n",
+ "Then we will synthesize the model with Vitis HLS and check the metrics of latency and FPGA resource usage.\n",
"\n",
"### Make an hls4ml config & model\n",
"The hls4ml Neural Network inference library is controlled through a configuration dictionary.\n",
@@ -246,13 +245,13 @@
"source": [
"import hls4ml\n",
"\n",
- "config = hls4ml.utils.config_from_keras_model(model, granularity='model')\n",
+ "config = hls4ml.utils.config_from_keras_model(model, granularity='model', backend='Vitis')\n",
"print(\"-----------------------------------\")\n",
"print(\"Configuration\")\n",
"plotting.print_dict(config)\n",
"print(\"-----------------------------------\")\n",
"hls_model = hls4ml.converters.convert_from_keras_model(\n",
- " model, hls_config=config, output_dir='model_1/hls4ml_prj', part='xcu250-figd2104-2L-e'\n",
+ " model, hls_config=config, backend='Vitis', output_dir='model_1/hls4ml_prj', part='xcu250-figd2104-2L-e'\n",
")"
]
},
@@ -327,21 +326,23 @@
"metadata": {},
"source": [
"## Synthesize\n",
- "Now we'll actually use Vivado HLS to synthesize the model. We can run the build using a method of our `hls_model` object.\n",
+ "Now we'll actually use Vitis HLS to synthesize the model. We can run the build using a method of our `hls_model` object.\n",
"After running this step, we can integrate the generated IP into a workflow to compile for a specific FPGA board.\n",
- "In this case, we'll just review the reports that Vivado HLS generates, checking the latency and resource usage.\n",
+ "In this case, we'll just review the reports that Vitis HLS generates, checking the latency and resource usage.\n",
"\n",
"**This can take several minutes.**\n",
"\n",
"While the C-Synthesis is running, we can monitor the progress looking at the log file by opening a terminal from the notebook home, and executing:\n",
"\n",
- "`tail -f model_1/hls4ml_prj/vivado_hls.log`"
+ "`tail -f model_1/hls4ml_prj/vitis_hls.log`"
]
},
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "scrolled": true
+ },
"outputs": [],
"source": [
"hls_model.build(csim=False)"
@@ -352,7 +353,7 @@
"metadata": {},
"source": [
"## Check the reports\n",
- "Print out the reports generated by Vivado HLS. Pay attention to the Latency and the 'Utilization Estimates' sections"
+ "Print out the reports generated by Vitis HLS. Pay attention to the Latency and the 'Utilization Estimates' sections"
]
},
{
@@ -373,11 +374,18 @@
"Calculate how many multiplications are performed for the inference of this network...\n",
"(We'll discuss the outcome)"
]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
}
],
"metadata": {
"kernelspec": {
- "display_name": "Python 3",
+ "display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
@@ -391,7 +399,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.8.6"
+ "version": "3.10.16"
}
},
"nbformat": 4,
diff --git a/part2_advanced_config.ipynb b/part2_advanced_config.ipynb
index 381c98cf..bd2832c1 100644
--- a/part2_advanced_config.ipynb
+++ b/part2_advanced_config.ipynb
@@ -25,7 +25,7 @@
"import plotting\n",
"import os\n",
"\n",
- "os.environ['PATH'] = os.environ['XILINX_VIVADO'] + '/bin:' + os.environ['PATH']"
+ "os.environ['PATH'] = os.environ['XILINX_VITIS'] + '/bin:' + os.environ['PATH']"
]
},
{
@@ -93,7 +93,7 @@
"source": [
"import hls4ml\n",
"\n",
- "config = hls4ml.utils.config_from_keras_model(model, granularity='name')\n",
+ "config = hls4ml.utils.config_from_keras_model(model, granularity='name', backend='Vitis')\n",
"print(\"-----------------------------------\")\n",
"plotting.print_dict(config)\n",
"print(\"-----------------------------------\")"
@@ -167,7 +167,7 @@
"for layer in config['LayerName'].keys():\n",
" config['LayerName'][layer]['Trace'] = True\n",
"hls_model = hls4ml.converters.convert_from_keras_model(\n",
- " model, hls_config=config, output_dir='model_1/hls4ml_prj_2', part='xcu250-figd2104-2L-e'\n",
+ " model, hls_config=config, backend='Vitis', output_dir='model_1/hls4ml_prj_2', part='xcu250-figd2104-2L-e'\n",
")"
]
},
@@ -286,14 +286,14 @@
"metadata": {},
"outputs": [],
"source": [
- "config = hls4ml.utils.config_from_keras_model(model, granularity='Model')\n",
+ "config = hls4ml.utils.config_from_keras_model(model, granularity='Model', backend='Vitis')\n",
"print(\"-----------------------------------\")\n",
"print(config)\n",
"print(\"-----------------------------------\")\n",
"# Set the ReuseFactor to 2 throughout\n",
"config['Model']['ReuseFactor'] = 2\n",
"hls_model = hls4ml.converters.convert_from_keras_model(\n",
- " model, hls_config=config, output_dir='model_1/hls4ml_prj_2', part='xcu250-figd2104-2L-e'\n",
+ " model, hls_config=config, backend='vitis', output_dir='model_1/hls4ml_prj_2', part='xcu250-figd2104-2L-e'\n",
")\n",
"hls_model.compile()\n",
"y_hls = hls_model.predict(X_test)\n",
@@ -315,7 +315,7 @@
"\n",
"While the C-Synthesis is running, we can monitor the progress looking at the log file by opening a terminal from the notebook home, and executing:\n",
"\n",
- "`tail -f model_1/hls4ml_prj_2/vivado_hls.log`"
+ "`tail -f model_1/hls4ml_prj_2/vitis_hls.log`"
]
},
{
@@ -364,7 +364,7 @@
],
"metadata": {
"kernelspec": {
- "display_name": "Python 3",
+ "display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
@@ -378,7 +378,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.8.6"
+ "version": "3.10.16"
}
},
"nbformat": 4,
diff --git a/part3_compression.ipynb b/part3_compression.ipynb
index b751f0e1..368882bd 100644
--- a/part3_compression.ipynb
+++ b/part3_compression.ipynb
@@ -28,7 +28,7 @@
"tf.random.set_seed(seed)\n",
"import os\n",
"\n",
- "os.environ['PATH'] = os.environ['XILINX_VIVADO'] + '/bin:' + os.environ['PATH']"
+ "os.environ['PATH'] = os.environ['XILINX_VITIS'] + '/bin:' + os.environ['PATH']"
]
},
{
@@ -146,7 +146,7 @@
" X_train_val,\n",
" y_train_val,\n",
" batch_size=1024,\n",
- " epochs=30,\n",
+ " epochs=10,\n",
" validation_split=0.25,\n",
" shuffle=True,\n",
" callbacks=callbacks.callbacks,\n",
@@ -225,7 +225,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
@@ -236,7 +235,7 @@
"\n",
"While the C-Synthesis is running, we can monitor the progress looking at the log file by opening a terminal from the notebook home, and executing:\n",
"\n",
- "`tail -f model_2/hls4ml_prj/vivado_hls.log`"
+ "`tail -f model_2/hls4ml_prj/vitis_hls.log`"
]
},
{
@@ -247,10 +246,10 @@
"source": [
"import hls4ml\n",
"\n",
- "config = hls4ml.utils.config_from_keras_model(model, granularity='model')\n",
+ "config = hls4ml.utils.config_from_keras_model(model, granularity='model', backend='Vitis')\n",
"print(config)\n",
"hls_model = hls4ml.converters.convert_from_keras_model(\n",
- " model, hls_config=config, output_dir='model_2/hls4ml_prj', part='xcu250-figd2104-2L-e'\n",
+ " model, hls_config=config, backend='Vitis', output_dir='model_2/hls4ml_prj', part='xcu250-figd2104-2L-e'\n",
")\n",
"hls_model.compile()\n",
"hls_model.build(csim=False)"
@@ -261,7 +260,7 @@
"metadata": {},
"source": [
"## Check the reports\n",
- "Print out the reports generated by Vivado HLS. Pay attention to the Utilization Estimates' section in particular this time."
+ "Print out the reports generated by Vitis HLS. Pay attention to the Utilization Estimates' section in particular this time."
]
},
{
@@ -277,7 +276,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "Print the report for the model trained in part 1. Remember these models have the same architecture, but the model in this section was trained using the sparsity API from tensorflow_model_optimization. Notice how the resource usage had dramatically reduced (particularly the DSPs). When Vivado HLS notices an operation like `y = 0 * x` it can avoid placing a DSP for that operation. The impact of this is biggest when `ReuseFactor = 1`, but still applies at higher reuse as well. **Note you need to have trained and synthesized the model from part 1**"
+ "Print the report for the model trained in part 1. Remember these models have the same architecture, but the model in this section was trained using the sparsity API from tensorflow_model_optimization. Notice how the resource usage had dramatically reduced (particularly the DSPs). When Vitis HLS notices an operation like `y = 0 * x` it can avoid placing a DSP for that operation. The impact of this is biggest when `ReuseFactor = 1`, but still applies at higher reuse as well. **Note you need to have trained and synthesized the model from part 1**"
]
},
{
@@ -299,7 +298,7 @@
],
"metadata": {
"kernelspec": {
- "display_name": "Python 3",
+ "display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
@@ -313,7 +312,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.8.6"
+ "version": "3.10.16"
}
},
"nbformat": 4,
diff --git a/part4.1_HG_quantization.ipynb b/part4.1_HG_quantization.ipynb
index d5d3dc16..19a923ab 100644
--- a/part4.1_HG_quantization.ipynb
+++ b/part4.1_HG_quantization.ipynb
@@ -29,7 +29,7 @@
"\n",
"tf.random.set_seed(seed)\n",
"\n",
- "os.environ['PATH'] = os.environ['XILINX_VIVADO'] + '/bin:' + os.environ['PATH']"
+ "os.environ['PATH'] = os.environ['XILINX_VITIS'] + '/bin:' + os.environ['PATH']"
]
},
{
@@ -150,7 +150,7 @@
},
{
"cell_type": "code",
- "execution_count": 29,
+ "execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -380,7 +380,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
@@ -391,30 +390,7 @@
"\n",
"While the C-Synthesis is running, we can monitor the progress looking at the log file by opening a terminal from the notebook home, and executing:\n",
"\n",
- "`tail -f model_3.1/hls4ml_prj/vivado_hls.log`"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Notice\n",
- "\n",
- "- For `vivado_hls`, adding a inline recursive pragma can greatly reduce the latency of the model (up to 50% for HGQ models). You can comment this cell out for comparison.\n",
- "- For `vitis`, pipelining and inlining cannot co-exist, this comment out this cell if you want to use `vitis`."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "with open('model_3.1/hls4ml_prj/firmware/myproject.cpp', 'r+') as f:\n",
- " code = f.read()\n",
- " f.seek(0)\n",
- " code = code.replace('#pragma HLS PIPELINE', '#pragma HLS PIPELINE\\n #pragma HLS INLINE RECURSIVE', 1)\n",
- " f.write(code)"
+ "`tail -f model_3.1/hls4ml_prj/vitis_hls.log`"
]
},
{
@@ -431,7 +407,7 @@
"metadata": {},
"source": [
"## Check the reports\n",
- "Print out the reports generated by Vivado HLS. Pay attention to the Utilization Estimates' section in particular this time.\n",
+ "Print out the reports generated by Vitis HLS. Pay attention to the Utilization Estimates' section in particular this time.\n",
"\n",
"## Notice\n",
"We strip away the softmax layer compare to part 4, which takes 3~5 cycles to compute. The overall latency could be comparable."
@@ -469,7 +445,7 @@
"metadata": {},
"source": [
"## NB\n",
- "Note as well that the Vivado HLS `csynth` resource estimates tend to _overestimate_ on chip resource usage. Running the subsequent stages of FPGA compilation reveals the more realistic resource usage, You can run the next step, 'logic synthesis' with `hls_model.build(synth=True, vsynth=True)`, but we skipped it in this tutorial in the interest of time."
+ "Note as well that the Vitis HLS `csynth` resource estimates tend to _overestimate_ on chip resource usage. Running the subsequent stages of FPGA compilation reveals the more realistic resource usage, You can run the next step, 'logic synthesis' with `hls_model.build(synth=True, vsynth=True)`, but we skipped it in this tutorial in the interest of time."
]
},
{
@@ -496,7 +472,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.11.7"
+ "version": "3.10.16"
}
},
"nbformat": 4,
diff --git a/part4_quantization.ipynb b/part4_quantization.ipynb
index 7c63958b..80f64c57 100644
--- a/part4_quantization.ipynb
+++ b/part4_quantization.ipynb
@@ -28,7 +28,7 @@
"tf.random.set_seed(seed)\n",
"import os\n",
"\n",
- "os.environ['PATH'] = os.environ['XILINX_VIVADO'] + '/bin:' + os.environ['PATH']"
+ "os.environ['PATH'] = os.environ['XILINX_VITIS'] + '/bin:' + os.environ['PATH']"
]
},
{
@@ -239,14 +239,14 @@
"import hls4ml\n",
"import plotting\n",
"\n",
- "config = hls4ml.utils.config_from_keras_model(model, granularity='name')\n",
+ "config = hls4ml.utils.config_from_keras_model(model, granularity='name', backend='Vitis')\n",
"config['LayerName']['softmax']['exp_table_t'] = 'ap_fixed<18,8>'\n",
"config['LayerName']['softmax']['inv_table_t'] = 'ap_fixed<18,4>'\n",
"print(\"-----------------------------------\")\n",
"plotting.print_dict(config)\n",
"print(\"-----------------------------------\")\n",
"hls_model = hls4ml.converters.convert_from_keras_model(\n",
- " model, hls_config=config, output_dir='model_3/hls4ml_prj', part='xcu250-figd2104-2L-e'\n",
+ " model, hls_config=config, backend='Vitis', output_dir='model_3/hls4ml_prj', part='xcu250-figd2104-2L-e'\n",
")\n",
"hls_model.compile()\n",
"\n",
@@ -290,7 +290,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
@@ -301,7 +300,7 @@
"\n",
"While the C-Synthesis is running, we can monitor the progress looking at the log file by opening a terminal from the notebook home, and executing:\n",
"\n",
- "`tail -f model_3/hls4ml_prj/vivado_hls.log`"
+ "`tail -f model_3/hls4ml_prj/vitis_hls.log`"
]
},
{
@@ -318,7 +317,7 @@
"metadata": {},
"source": [
"## Check the reports\n",
- "Print out the reports generated by Vivado HLS. Pay attention to the Utilization Estimates' section in particular this time."
+ "Print out the reports generated by Vitis HLS. Pay attention to the Utilization Estimates' section in particular this time."
]
},
{
@@ -352,7 +351,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "Print the report for the model trained in part 3. Both these models were trained with 75% sparsity, but the new model uses 6-bit precision as well. You can see how Vivado HLS has moved multiplication operations from DSPs into LUTs, reducing the \"critical\" resource usage.\n",
+ "Print the report for the model trained in part 3. Both these models were trained with 75% sparsity, but the new model uses 6-bit precision as well. You can see how Vitis HLS has moved multiplication operations from DSPs into LUTs, reducing the \"critical\" resource usage.\n",
"\n",
"**Note you need to have trained and synthesized the model from part 3**"
]
@@ -371,7 +370,7 @@
"metadata": {},
"source": [
"## NB\n",
- "Note as well that the Vivado HLS resource estimates tend to _overestimate_ LUTs, while generally estimating the DSPs correctly. Running the subsequent stages of FPGA compilation reveals the more realistic resource usage, You can run the next step, 'logic synthesis' with `hls_model.build(synth=True, vsynth=True)`, but we skipped it in this tutorial in the interest of time."
+ "Note as well that the Vitis HLS resource estimates tend to _overestimate_ LUTs, while generally estimating the DSPs correctly. Running the subsequent stages of FPGA compilation reveals the more realistic resource usage, You can run the next step, 'logic synthesis' with `hls_model.build(synth=True, vsynth=True)`, but we skipped it in this tutorial in the interest of time."
]
},
{
@@ -398,7 +397,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.9.13"
+ "version": "3.10.16"
}
},
"nbformat": 4,
diff --git a/part5_bdt.ipynb b/part5_bdt.ipynb
index 9b4640d7..b486e65f 100644
--- a/part5_bdt.ipynb
+++ b/part5_bdt.ipynb
@@ -1,69 +1,82 @@
{
"cells": [
{
- "attachments": {},
"cell_type": "markdown",
- "id": "209d2b58",
"metadata": {},
"source": [
- "# Part 5: Boosted Decision Trees\n",
+ "\n",
"\n",
- "The `conifer` package was created out of `hls4ml`, providing a similar set of features but specifically targeting inference of Boosted Decision Trees. In this notebook we will train a `GradientBoostingClassifier` with scikit-learn, using the same jet tagging dataset as in the other tutorial notebooks. Then we will convert the model using `conifer`, and run bit-accurate prediction and synthesis as we did with `hls4ml` before.\n",
+ "In this notebook we will take the first steps with training a BDT with `xgboost`, then translating it to HLS code for FPGA with `conifer`\n",
"\n",
- "`conifer` is available from GitHub [here](https://github.com/thesps/conifer), and we have a publication describing the inference implementation and performance in detail [here](https://iopscience.iop.org/article/10.1088/1748-0221/15/05/P05026/pdf).\n",
+ "Key concepts:\n",
+ "- model training\n",
+ "- model evaluation\n",
+ "- `conifer` configuration and conversion\n",
+ "- model emulation\n",
+ "- model synthesis\n",
+ "- accelerator creation\n",
"\n",
- ""
+ "For some use cases, the Forest Processing Unit might be an easier entry point as no FPGA synthesis is required for supported boards. Read more about the FPU here: https://ssummers.web.cern.ch/conifer/fpu.html"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "eda9b784",
"metadata": {},
"outputs": [],
"source": [
+ "import xgboost as xgb\n",
+ "import matplotlib.pyplot as plt\n",
+ "import plotting\n",
"import numpy as np\n",
- "from sklearn.ensemble import GradientBoostingClassifier\n",
+ "from scipy.special import softmax\n",
"from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n",
- "from sklearn.metrics import accuracy_score\n",
- "import joblib\n",
"import conifer\n",
- "import plotting\n",
- "import matplotlib.pyplot as plt\n",
+ "import json\n",
"import os\n",
+ "import sys\n",
+ "\n",
+ "os.environ['PATH'] = os.environ['XILINX_VITIS'] + '/bin:' + os.environ['PATH']\n",
+ "\n",
+ "# enable more output from conifer\n",
+ "import logging\n",
+ "\n",
+ "logging.basicConfig(stream=sys.stdout, level=logging.WARNING)\n",
+ "logger = logging.getLogger('conifer')\n",
+ "logger.setLevel('DEBUG')\n",
+ "\n",
+ "# create a random seed at we use to make the results repeatable\n",
+ "seed = int('hls4ml-tutorial'.encode('utf-8').hex(), 16) % 2**31\n",
"\n",
- "os.environ['PATH'] = os.environ['XILINX_VIVADO'] + '/bin:' + os.environ['PATH']\n",
- "np.random.seed(0)"
+ "print(f'Using conifer version {conifer.__version__}')"
]
},
{
- "attachments": {},
"cell_type": "markdown",
- "id": "18354699",
"metadata": {},
"source": [
- "## Load the dataset\n",
- "Note you need to have gone through `part1_getting_started` to download the data."
+ "# Load dataset\n",
+ "\n",
+ "Load the jet tagging dataset.\n",
+ "\n",
+ "**Note**: you need to run part1 first."
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "1574ed18",
"metadata": {},
"outputs": [],
"source": [
"X_train_val = np.load('X_train_val.npy')\n",
"X_test = np.load('X_test.npy')\n",
- "y_train_val = np.load('y_train_val.npy')\n",
- "y_test = np.load('y_test.npy', allow_pickle=True)\n",
+ "y_train_val_one_hot = np.load('y_train_val.npy')\n",
+ "y_test_one_hot = np.load('y_test.npy')\n",
"classes = np.load('classes.npy', allow_pickle=True)"
]
},
{
- "attachments": {},
"cell_type": "markdown",
- "id": "24658fb4",
"metadata": {},
"source": [
"We need to transform the test labels from the one-hot encoded values to labels"
@@ -72,219 +85,425 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "00f304bd",
"metadata": {},
"outputs": [],
"source": [
"le = LabelEncoder().fit(classes)\n",
"ohe = OneHotEncoder().fit(le.transform(classes).reshape(-1, 1))\n",
- "y_train_val = ohe.inverse_transform(y_train_val.astype(int))\n",
- "y_test = ohe.inverse_transform(y_test)"
+ "y_train_val = ohe.inverse_transform(y_train_val_one_hot.astype(int))\n",
+ "y_test = ohe.inverse_transform(y_test_one_hot)"
]
},
{
- "attachments": {},
"cell_type": "markdown",
- "id": "8305e22c",
"metadata": {},
"source": [
- "## Train a `GradientBoostingClassifier`\n",
- "We will use 20 estimators with a maximum depth of 3. The number of decision trees will be `n_estimators * n_classes`, so 100 for this dataset. If you are returning to this notebook having already trained the BDT once, set `train = False` to load the model rather than retrain."
+ "# Train a BDT\n",
+ "We'll use `xgboost`'s `XGBClassifier` with:\n",
+ "\n",
+ "| Parameter | Explanation |\n",
+ "| --- | --- |\n",
+ "| `n_estimators=25` | 25 trees |\n",
+ "| `max_depth=5` | maximum tree depth of 5 |"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "f5044231",
"metadata": {},
"outputs": [],
"source": [
- "train = True\n",
- "if train:\n",
- " clf = GradientBoostingClassifier(n_estimators=20, learning_rate=1.0, max_depth=3, random_state=0, verbose=1).fit(\n",
- " X_train_val, y_train_val.ravel()\n",
- " )\n",
- " if not os.path.exists('model_5'):\n",
- " os.makedirs('model_5')\n",
- " joblib.dump(clf, 'model_5/bdt.joblib')\n",
- "else:\n",
- " clf = joblib.load('model_5/bdt.joblib')"
+ "clf = xgb.XGBClassifier(n_estimators=25, max_depth=5, learning_rate=1.0, random_state=seed).fit(X_train_val, y_train_val)"
]
},
{
- "attachments": {},
"cell_type": "markdown",
- "id": "5e9857c2",
"metadata": {},
"source": [
- "## Create a conifer configuration\n",
+ "# Validate performance\n",
+ "Now we check whether the trained model is any good. We'll plot the ROC curve."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from sklearn.metrics import accuracy_score\n",
+ "from tensorflow.keras.models import load_model\n",
+ "\n",
+ "# load the KERAS model from part 1\n",
+ "model_ref = load_model('model_1/KERAS_check_best_model.h5')\n",
+ "y_ref = model_ref.predict(X_test)\n",
+ "\n",
+ "# compute predictions of the xgboost model\n",
+ "y_xgb = clf.predict_proba(X_test)\n",
+ "print(f'Accuracy baseline: {accuracy_score(np.argmax(y_test_one_hot, axis=1), np.argmax(y_ref, axis=1)):.5f}')\n",
+ "print(f'Accuracy xgboost: {accuracy_score(np.argmax(y_test_one_hot, axis=1), np.argmax(y_xgb, axis=1)):.5f}')\n",
+ "\n",
+ "fig, ax = plt.subplots(figsize=(9, 9))\n",
+ "_ = plotting.makeRoc(y_test_one_hot, y_ref, classes, linestyle='--')\n",
+ "plt.gca().set_prop_cycle(None) # reset the colors\n",
+ "_ = plotting.makeRoc(y_test_one_hot, y_xgb, classes, linestyle='-')\n",
+ "\n",
+ "# add a legend\n",
+ "from matplotlib.lines import Line2D\n",
"\n",
- "Similarly to `hls4ml`, we can use a utility method to get a template for the configuration dictionary that we can modify."
+ "lines = [\n",
+ " Line2D([0], [0], ls='--'),\n",
+ " Line2D([0], [0], ls='-'),\n",
+ "]\n",
+ "from matplotlib.legend import Legend\n",
+ "\n",
+ "leg = Legend(ax, lines, labels=['part1 Keras', 'xgboost'], loc='lower right', frameon=False)\n",
+ "ax.add_artist(leg)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "\n",
+ "\n",
+ "Now we'll convert this model to FPGA firmware with `conifer`. We first need to create a configuration in the form of a dictionary. The quickest way to get started is to create a default configuration from the intended target backend (`xilinxhls` for us). Each backend may have different configuration options, so getting the configuration this way helps enumerate the possible options.\n",
+ "\n",
+ "We will print the configuration, modify it, and print it again. The modifications are:\n",
+ "- set the `OutputDirectory` to something descriptive\n",
+ "- set the `XilinxPart` to the part number of the FPGA on the Alveo U50"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "5bab868f",
"metadata": {},
"outputs": [],
"source": [
"cfg = conifer.backends.xilinxhls.auto_config()\n",
- "cfg['OutputDir'] = 'model_5/conifer_prj'\n",
+ "\n",
+ "# print the config\n",
+ "print('Default Configuration\\n' + '-' * 50)\n",
+ "plotting.print_dict(cfg)\n",
+ "print('-' * 50)\n",
+ "\n",
+ "# modify the config\n",
+ "cfg['OutputDir'] = 'model_5/'\n",
"cfg['XilinxPart'] = 'xcu250-figd2104-2L-e'\n",
- "plotting.print_dict(cfg)"
+ "\n",
+ "# print the config again\n",
+ "print('Modified Configuration\\n' + '-' * 50)\n",
+ "plotting.print_dict(cfg)\n",
+ "print('-' * 50)"
]
},
{
- "attachments": {},
"cell_type": "markdown",
- "id": "9e3ca740",
"metadata": {},
"source": [
- "## Convert the model\n",
- "The syntax for model conversion with `conifer` is a little different to `hls4ml`. We construct a `conifer.model` object, providing the trained BDT, the converter corresponding to the library we used, the conifer 'backend' that we wish to target, and the configuration.\n",
+ "## Convert and write\n",
+ "Convert the `xgboost` model to a `conifer` one, and print the `help` to see what methods it implements.\n",
+ "Then `write` the model, creating the specified output directory and writing all the HLS files to it. We also save the `xgboost` model itself.\n",
"\n",
- "`conifer` has converters for:\n",
+ "#### Other converters:\n",
+ "`conifer` has converters for several popular BDT training libraries. Each one is used like: `conifer.converters.convert_from_(model, config)`\n",
+ "The converters are:\n",
"- `sklearn`\n",
"- `xgboost`\n",
+ "- `ydf`\n",
"- `tmva`\n",
+ "- `onnx` (exposing `catboost` and `lightGBM`)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# convert the model to the conifer representation\n",
+ "conifer_model = conifer.converters.convert_from_xgboost(clf, cfg)\n",
+ "# print the help to see the API on the conifer_model\n",
+ "help(conifer_model)\n",
+ "# write the project (writing HLS project to disk)\n",
+ "conifer_model.write()\n",
+ "# save the conifer model - we can load this again later\n",
+ "clf.save_model('model_5/xgboost_model.json')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Explore\n",
+ "Browse the files in the newly created project directory to take a look at the HLS code.\n",
"\n",
- "And backends:\n",
- "- `vivadohls`\n",
- "- `vitishls`\n",
- "- `xilinxhls` (use whichever `vivado` or `vitis` is on the path\n",
- "- `vhdl`\n",
+ "The output of `!tree model_5` is:\n",
+ "\n",
+ "```\n",
+ "model_5/\n",
+ "├── bridge.cpp\n",
+ "├── build_hls.tcl\n",
+ "├── firmware\n",
+ "│ ├── BDT.cpp\n",
+ "│ ├── BDT.h\n",
+ "│ ├── my_prj.cpp\n",
+ "│ ├── my_prj.h\n",
+ "│ └── parameters.h\n",
+ "├── hls_parameters.tcl\n",
+ "├── my_prj.json\n",
+ "├── my_prj_test.cpp\n",
+ "├── tb_data\n",
+ "└── vivado_synth.tcl\n",
+ "```\n",
+ "\n",
+ "- files under `firmware` are the HLS implementation of the model\n",
+ "- `my_prj.json` is the saved converted `conifer` model that can be loaded again without the original `xgboost` model\n",
+ "- `tcl` scripts are used for synthesizing the project"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Emulate\n",
+ "Before starting the lengthy FPGA build process, we should validate that our conversion was successful and that the choice of precision was suitable with a bit-accurate emulation. To do this we need to run the HLS C++ code on the CPU with some test data first. This is like the HLS C Simulation step, but rather than writing a C++ testbench and invoking `vitis_hls` to run `csim`, `conifer` implements Python bindings for the HLS, just like `hls4ml`.\n",
"\n",
- "Here we will use the `sklearn` converter, since that's how we trained our model, and the `vivadohls` backend. For larger BDTs with many more trees or depth, it may be preferable to generate VHDL directly using the `vhdl` backend to get best performance. See [our paper](https://iopscience.iop.org/article/10.1088/1748-0221/15/05/P05026/pdf) for the performance comparison between those backends."
+ "We first need to compile (which uses the C++ compiler), then we can make predictions"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "7ebf5b06",
"metadata": {},
"outputs": [],
"source": [
- "cnf = conifer.model(clf, conifer.converters.sklearn, conifer.backends.vivadohls, cfg)\n",
- "cnf.compile()"
+ "conifer_model.compile()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "y_hls = conifer_model.decision_function(X_test)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Compare\n",
+ "\n",
+ "Now we check whether the emulated predictions are good. To do this we'll plot the ROC curve again with the HLS predictions overlaid."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "y_hls_proba = softmax(y_hls) # compute class probabilities from the raw predictions\n",
+ "\n",
+ "print(f'Accuracy baseline: {accuracy_score(np.argmax(y_test_one_hot, axis=1), np.argmax(y_ref, axis=1)):.5f}')\n",
+ "print(f'Accuracy xgboost: {accuracy_score(np.argmax(y_test_one_hot, axis=1), np.argmax(y_xgb, axis=1)):.5f}')\n",
+ "print(f'Accuracy conifer: {accuracy_score(np.argmax(y_test_one_hot, axis=1), np.argmax(y_hls_proba, axis=1)):.5f}')\n",
+ "\n",
+ "\n",
+ "fig, ax = plt.subplots(figsize=(9, 9))\n",
+ "_ = plotting.makeRoc(y_test_one_hot, y_ref, classes, linestyle='--')\n",
+ "plt.gca().set_prop_cycle(None) # reset the colors\n",
+ "_ = plotting.makeRoc(y_test_one_hot, y_xgb, classes, linestyle=':')\n",
+ "plt.gca().set_prop_cycle(None) # reset the colors\n",
+ "_ = plotting.makeRoc(y_test_one_hot, y_hls_proba, classes, linestyle='-')\n",
+ "\n",
+ "# add a legend\n",
+ "from matplotlib.lines import Line2D\n",
+ "\n",
+ "lines = [\n",
+ " Line2D([0], [0], ls='--'),\n",
+ " Line2D([0], [0], ls=':'),\n",
+ " Line2D([0], [0], ls='-'),\n",
+ "]\n",
+ "from matplotlib.legend import Legend\n",
+ "\n",
+ "leg = Legend(ax, lines, labels=['part1 Keras', 'xgboost', 'conifer'], loc='lower right', frameon=False)\n",
+ "ax.add_artist(leg)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Build\n",
+ "Now we'll run the Vitis HLS and Vivado synthesis. HLS C Synthesis compiles our C++ to RTL, performing scheduling and resource mapping. Vivado synthesis synthesizes the RTL from the previous step into a netlist, and produces a more realistic resource estimation. The latency can't change during Vivado synthesis, it's fixed in the RTL description.\n",
+ "\n",
+ "After the build completes we can also browse the new log files and reports that are generated.\n",
+ "\n",
+ "**Warning**: this step might take around 10 minutes"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "conifer_model.build(synth=True, vsynth=True)"
]
},
{
- "attachments": {},
"cell_type": "markdown",
- "id": "dc5e487b",
"metadata": {},
"source": [
- "## profile\n",
- "Similarly to hls4ml, we can visualize the distribution of the parameters of the BDT to guide the choice of precision"
+ "## Report\n",
+ "If the synthesis completed successfuly, we can extract the key metrics from the reports and print them out.\n",
+ "The section `\"vsynth\"` contains the report from the Vivado RTL synthesis, which is usually lower, and more realistic than the HLS report."
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "993fef56",
"metadata": {},
"outputs": [],
"source": [
- "cnf.profile()"
+ "report = conifer_model.read_report()\n",
+ "plotting.print_dict(report)"
]
},
{
- "attachments": {},
"cell_type": "markdown",
- "id": "9c840ca4",
"metadata": {},
"source": [
- "## Run inference\n",
- "Now we can execute the BDT inference with `sklearn`, and also the bit exact simulation using Vivado HLS. The output that the `conifer` BDT produces is equivalent to the `decision_function` method."
+ "## Deployment with `pynq`\n",
+ "\n",
+ "There are two main ways to deploy a BDT to an accelerator card with `conifer`:\n",
+ "- build a static accelerator with Xilinx HLS backend\n",
+ "- use the dynamic accelerator Forest Processing Unit (FPU)\n",
+ "\n",
+ "Getting started with the FPU is straightforward. For a supported board, you will need only the converted model JSON, and a bitfile that can be downloaded from the conifer website. Read more about the FPU here: https://ssummers.web.cern.ch/conifer/fpu.html\n",
+ "\n",
+ "However, without a physical device there's not much to show, so in this section we'll see how to deploy the model that we already trained as a static accelerator to a `pynq-z2` board.\n",
+ "We'll use the `AcceleratorConfig` part of the configuration that we previously left undefined."
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "b9fd0fee",
"metadata": {},
"outputs": [],
"source": [
- "y_skl = clf.decision_function(X_test)\n",
- "y_cnf = cnf.decision_function(X_test)"
+ "pynq_model_cfg = conifer.backends.xilinxhls.auto_config()\n",
+ "pynq_model_cfg['OutputDir'] = 'model_5_pynq' # choose a new project directory\n",
+ "pynq_model_cfg['ProjectName'] = 'conifer_jettag'\n",
+ "pynq_model_cfg['AcceleratorConfig'] = {\n",
+ " 'Board': 'pynq-z2', # choose a pynq-z2 board\n",
+ " 'InterfaceType': 'float', # floating point for the data I/O (this is default)\n",
+ "}\n",
+ "\n",
+ "# print the config\n",
+ "print('Modified Configuration\\n' + '-' * 50)\n",
+ "print(json.dumps(pynq_model_cfg, indent=2))\n",
+ "print('-' * 50)"
]
},
{
- "attachments": {},
"cell_type": "markdown",
- "id": "c486535e",
"metadata": {},
"source": [
- "## Check performance\n",
+ "## Supported boards\n",
"\n",
- "Print the accuracy from `sklearn` and `conifer` evaluations, and plot the ROC curves. We should see that we can get quite close to the accuracy of the Neural Networks from parts 1-4."
+ "Here we print the list of supported boards, so you can see what else works \"out of the box\". It's relatively easy to add other Zynq SoC or Alveo boards, for example to add an Alveo U50 card targeting `xilinx_u50_gen3x16_xdma_5_202210_1` platform:\n",
+ "\n",
+ "```\n",
+ "u50 = conifer.backends.boards.AlveoConfig.default_config()\n",
+ "u50['xilinx_part'] = 'xcu50-fsvh2104-2-e'\n",
+ "u50['platform'] = 'xilinx_u50_gen3x16_xdma_5_202210_1'\n",
+ "u50['name'] = 'xilinx_u50_gen3x16_xdma_5_202210_1'\n",
+ "u50 = conifer.backends.boards.AlveoConfig(u50)\n",
+ "conifer.backends.boards.register_board_config(u50.name, u50)\n",
+ "```"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "3a87c1b8",
"metadata": {},
"outputs": [],
"source": [
- "yt = ohe.transform(y_test).toarray().astype(int)\n",
- "print(\"Accuracy sklearn: {}\".format(accuracy_score(np.argmax(yt, axis=1), np.argmax(y_skl, axis=1))))\n",
- "print(\"Accuracy conifer: {}\".format(accuracy_score(np.argmax(yt, axis=1), np.argmax(y_cnf, axis=1))))\n",
- "fig, ax = plt.subplots(figsize=(9, 9))\n",
- "_ = plotting.makeRoc(yt, y_skl, classes)\n",
- "plt.gca().set_prop_cycle(None) # reset the colors\n",
- "_ = plotting.makeRoc(yt, y_cnf, classes, linestyle='--')"
+ "# This is the full list of supported boards:\n",
+ "print(f'Supported boards: {conifer.backends.boards.get_available_boards()}')"
]
},
{
- "attachments": {},
"cell_type": "markdown",
- "id": "70c43d82",
"metadata": {},
"source": [
- "## Synthesize\n",
- "Now run the Vivado HLS C Synthesis step to produce an IP that we can use, and inspect the estimate resources and latency.\n",
- "You can see some live output while the synthesis is running by opening a terminal from the Jupyter home page and executing:\n",
- "`tail -f model_5/conifer_prj/vivado_hls.log`"
+ "### Load the model\n",
+ "\n",
+ "We load the JSON for the conifer model we previously used, applying the new configuration just defined. We'll see that the FPGA part specified by the board overrides the `XilinxPart` specified in the default."
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "721814ef",
"metadata": {},
"outputs": [],
"source": [
- "cnf.build()"
+ "pynq_model = conifer.model.load_model('model_5/my_prj.json', new_config=pynq_model_cfg)\n",
+ "pynq_model.write()"
]
},
{
- "attachments": {},
"cell_type": "markdown",
- "id": "ad1efe07",
"metadata": {},
"source": [
- "## Read report\n",
- "We can use an hls4ml utility to read the Vivado report"
+ "## Build the model\n",
+ "\n",
+ "Now we run `build` again, running HLS Synthesis, Logic Synthesis and Place & Route, finally producing a bitfile and an archive of files that we'll need to run inference on the pynq-z2 board. \n",
+ "\n",
+ "**Warning**: this step might take around 20 minutes to complete.\n",
+ "\n",
+ "The floorplan of the bitfile should like something like this, where the individual tree modules are highlighted in different colours:\n",
+ "\n",
+ ""
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "578a62c3",
"metadata": {},
"outputs": [],
"source": [
- "import hls4ml\n",
+ "pynq_model.build(synth=True, bitfile=True, package=True)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Inference on pynq-z2\n",
"\n",
- "hls4ml.report.read_vivado_report('model_5/conifer_prj/')"
+ "Running inference on the `pynq-z2` would look like this:\n",
+ "- download the `model_5/conifer_jettag.zip` archive from this notebook\n",
+ "- upload `conifer_jettag.zip` to the pynq-z2 device and unzip it\n",
+ "- start a jupyter notebook on the `pynq-z2` and run the following code:\n",
+ "\n",
+ "```\n",
+ "import conifer\n",
+ "accelerator = conifer.backends.xilinxhls.runtime.ZynqDriver('conifer_jettag.bit', batch_size=1)\n",
+ "X = ... # load some data \n",
+ "y_pynq = accelerator.decision_function(X)\n",
+ "```\n"
]
}
],
"metadata": {
"kernelspec": {
- "display_name": "Python 3",
+ "display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
@@ -298,9 +517,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.7.10"
+ "version": "3.10.10"
}
},
"nbformat": 4,
- "nbformat_minor": 5
+ "nbformat_minor": 4
}
diff --git a/part6_cnns.ipynb b/part6_cnns.ipynb
index a069a712..7beb4bbb 100644
--- a/part6_cnns.ipynb
+++ b/part6_cnns.ipynb
@@ -1,7 +1,6 @@
{
"cells": [
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {
"deletable": false,
@@ -24,7 +23,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {
"deletable": false,
@@ -44,11 +42,12 @@
"import numpy as np\n",
"import time\n",
"import tensorflow.compat.v2 as tf\n",
- "import tensorflow_datasets as tfds"
+ "import tensorflow_datasets as tfds\n",
+ "\n",
+ "os.environ['PATH'] = os.environ['XILINX_VITIS'] + '/bin:' + os.environ['PATH']"
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {
"deletable": false,
@@ -80,7 +79,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {
"deletable": false,
@@ -129,7 +127,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {
"deletable": false,
@@ -193,7 +190,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {
"deletable": false,
@@ -219,7 +215,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {
"deletable": false,
@@ -265,7 +260,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {
"deletable": false,
@@ -318,7 +312,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {
"deletable": false,
@@ -329,7 +322,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {
"deletable": false,
@@ -401,7 +393,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {
"deletable": false,
@@ -460,7 +451,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {
"deletable": false,
@@ -560,7 +550,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {
"deletable": false,
@@ -608,7 +597,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {
"deletable": false,
@@ -620,7 +608,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {
"deletable": false,
@@ -655,7 +642,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
@@ -684,36 +670,23 @@
"import plotting\n",
"\n",
"# First, the baseline model\n",
- "hls_config = hls4ml.utils.config_from_keras_model(model, granularity='name')\n",
+ "hls_config = hls4ml.utils.config_from_keras_model(\n",
+ " model, granularity='name', backend='Vitis', default_precision='ap_fixed<16,6>'\n",
+ ")\n",
"\n",
- "# Set the precision and reuse factor for the full model\n",
- "hls_config['Model']['Precision'] = 'ap_fixed<16,6>'\n",
- "hls_config['Model']['ReuseFactor'] = 1\n",
- "\n",
- "# Create an entry for each layer, here you can for instance change the strategy for a layer to 'resource'\n",
- "# or increase the reuse factor individually for large layers.\n",
- "# In this case, we designed the model to be small enough for a fully parallel implementation\n",
- "# so we use the latency strategy and reuse factor of 1 for all layers.\n",
- "for Layer in hls_config['LayerName'].keys():\n",
- " hls_config['LayerName'][Layer]['Strategy'] = 'Latency'\n",
- " hls_config['LayerName'][Layer]['ReuseFactor'] = 1\n",
- "# If you want best numerical performance for high-accuray models, while the default latency strategy is faster but numerically more unstable\n",
- "hls_config['LayerName']['output_softmax']['Strategy'] = 'Stable'\n",
"plotting.print_dict(hls_config)\n",
"\n",
- "cfg = hls4ml.converters.create_config(backend='Vivado')\n",
- "cfg['IOType'] = 'io_stream' # Must set this if using CNNs!\n",
- "cfg['HLSConfig'] = hls_config\n",
- "cfg['KerasModel'] = model\n",
- "cfg['OutputDir'] = 'pruned_cnn/'\n",
- "cfg['XilinxPart'] = 'xcu250-figd2104-2L-e'\n",
"\n",
- "hls_model = hls4ml.converters.keras_to_hls(cfg)\n",
+ "hls_model = hls4ml.converters.convert_from_keras_model(\n",
+ " model, hls_config=hls_config, backend='Vitis', output_dir='model_1/hls4ml_prj', part='xcu250-figd2104-2L-e'\n",
+ ")\n",
+ "hls_model = hls4ml.converters.keras_to_hls(\n",
+ " model, config=hls_config, output_dir='pruned_cnn', backend='Vitis', part='xcu250-figd2104-2L-e', io_type='io_stream'\n",
+ ")\n",
"hls_model.compile()"
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {
"deletable": false,
@@ -742,7 +715,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {
"deletable": false,
@@ -759,13 +731,13 @@
"outputs": [],
"source": [
"# Then the QKeras model\n",
- "hls_config_q = hls4ml.utils.config_from_keras_model(qmodel, granularity='name')\n",
+ "hls_config_q = hls4ml.utils.config_from_keras_model(qmodel, granularity='name', backend='Vitis')\n",
"hls_config_q['Model']['ReuseFactor'] = 1\n",
"hls_config['Model']['Precision'] = 'ap_fixed<16,6>'\n",
"hls_config_q['LayerName']['output_softmax']['Strategy'] = 'Stable'\n",
"plotting.print_dict(hls_config_q)\n",
"\n",
- "cfg_q = hls4ml.converters.create_config(backend='Vivado')\n",
+ "cfg_q = hls4ml.converters.create_config(backend='Vitis')\n",
"cfg_q['IOType'] = 'io_stream' # Must set this if using CNNs!\n",
"cfg_q['HLSConfig'] = hls_config_q\n",
"cfg_q['KerasModel'] = qmodel\n",
@@ -777,7 +749,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {
"deletable": false,
@@ -798,7 +769,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {
"deletable": false,
@@ -809,7 +779,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {
"deletable": false,
@@ -892,7 +861,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {
"deletable": false,
@@ -912,7 +880,7 @@
"source": [
"import os\n",
"\n",
- "os.environ['PATH'] = os.environ['XILINX_VIVADO'] + '/bin:' + os.environ['PATH']\n",
+ "os.environ['PATH'] = '/opt/Xilinx//Vitis_HLS/2024.1/bin:' + os.environ['PATH']\n",
"\n",
"synth = False # Only if you want to synthesize the models yourself (>1h per model) rather than look at the provided reports.\n",
"if synth:\n",
@@ -921,7 +889,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {
"deletable": false,
@@ -988,7 +955,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
@@ -1008,7 +974,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {
"deletable": false,
@@ -1115,7 +1080,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
@@ -1264,7 +1228,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
@@ -1312,7 +1275,6 @@
]
},
{
- "attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
@@ -1358,7 +1320,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.9.16"
+ "version": "3.10.16"
}
},
"nbformat": 4,
diff --git a/part7a_bitstream.ipynb b/part7a_bitstream.ipynb
index 718b07a1..1aa91da8 100644
--- a/part7a_bitstream.ipynb
+++ b/part7a_bitstream.ipynb
@@ -26,7 +26,7 @@
"_add_supported_quantized_objects(co)\n",
"import os\n",
"\n",
- "os.environ['PATH'] = os.environ['XILINX_VIVADO'] + '/bin:' + os.environ['PATH']"
+ "os.environ['PATH'] = os.environ['XILINX_Vivado'] + '/bin:' + os.environ['PATH']"
]
},
{
@@ -74,7 +74,7 @@
"import hls4ml\n",
"import plotting\n",
"\n",
- "config = hls4ml.utils.config_from_keras_model(model, granularity='name')\n",
+ "config = hls4ml.utils.config_from_keras_model(model, granularity='name', backend='Vitis')\n",
"config['LayerName']['softmax']['exp_table_t'] = 'ap_fixed<18,8>'\n",
"config['LayerName']['softmax']['inv_table_t'] = 'ap_fixed<18,4>'\n",
"for layer in ['fc1', 'fc2', 'fc3', 'output']:\n",
@@ -282,7 +282,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.9.16"
+ "version": "3.10.16"
}
},
"nbformat": 4,
diff --git a/part7b_deployment.ipynb b/part7b_deployment.ipynb
index d26a8112..43760018 100644
--- a/part7b_deployment.ipynb
+++ b/part7b_deployment.ipynb
@@ -112,7 +112,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.9.16"
+ "version": "3.10.16"
}
},
"nbformat": 4,
diff --git a/part7c_validation.ipynb b/part7c_validation.ipynb
index 304b95ef..e3745d13 100644
--- a/part7c_validation.ipynb
+++ b/part7c_validation.ipynb
@@ -72,7 +72,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.9.16"
+ "version": "3.10.16"
}
},
"nbformat": 4,
diff --git a/part8_symbolic_regression.ipynb b/part8_symbolic_regression.ipynb
index af93c1a3..2f6a98f7 100644
--- a/part8_symbolic_regression.ipynb
+++ b/part8_symbolic_regression.ipynb
@@ -309,9 +309,7 @@
"cell_type": "code",
"execution_count": null,
"id": "46ff4b5e",
- "metadata": {
- "scrolled": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"# Use hls4ml to convert sympy expressions into HLS model\n",
@@ -472,8 +470,8 @@
"metadata": {},
"outputs": [],
"source": [
- "!source ${XILINX_VIVADO}/settings64.sh\n",
- "!vivado_hls -f build_prj.tcl \"reset=1 synth=1 csim=0 cosim=0 validation=0 export=0 vsynth=0\"\n",
+ "!source ${XILINX_VITIS}/settings64.sh\n",
+ "!vitis_hls -f build_prj.tcl \"reset=1 synth=1 csim=0 cosim=0 validation=0 export=0 vsynth=0\"\n",
"!cat my-hls-test/myproject_prj/solution1/syn/report/myproject_csynth.rpt"
]
}
@@ -494,7 +492,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.9.12"
+ "version": "3.10.16"
}
},
"nbformat": 4,