Skip to content

Light client benchmark #5

Light client benchmark

Light client benchmark #5

Workflow file for this run

# Runs benchmarks on self-hosted infra via `workflow_dispatch`
# This trigger can be found at https://github.com/wormhole-foundation/example-zk-light-clients-internal/actions/workflows/bench.yml
#
# The output can be found in the logs or in a comment on the latest commit. This can be viewed on GitHub at the bottom of the commit page.
# See https://github.com/wormhole-foundation/example-zk-light-clients-internal/commit/3d06c3585e94fe027bf7dacf865106c259994c85#comments
name: Manual benchmark
on:
workflow_dispatch:
inputs:
# Name of the `aptos/light-client` benchmark to run
bench-name:
type: string
required: true
# Optionally send a message to the below Zulip streams
# Defaults to false
zulip:
type: boolean
required: false
# User(s) to whom to send a private DM (optional)
# Comma-separated list of user ID integers, e.g. `11,12` (IDs can be found in user profiles)
# If not specified, sends to a stream/topic pair instead
private:
description: 'DM given user ID(s)'
type: string
required: false
# Zulip stream in which to send the message (optional)
# Ignored if `private` input is specified
# Defaults to `light_client` stream
stream:
type: string
required: false
# Zulip topic in which to send the message (optional)
# Ignored if `private` input is specified
# Defaults to `chat`
topic:
type: string
required: false
schedule:
# Bench report on Tuesdays at 11pm EST/12pm EDT
- cron: '0 16 * * 4'
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
benchmark:
name: Manual benchmark
runs-on: [self-hosted, bench, avx512]
env:
CARGO_NET_GIT_FETCH_WITH_CLI: "true"
steps:
- uses: actions/checkout@v4
with:
repository: lurk-lab/ci-workflows
- uses: ./.github/actions/ci-env
- uses: actions/checkout@v4
- name: Setup CI
uses: ./.github/actions/setup
with:
pull_token: ${{ secrets.REPO_TOKEN }}
- uses: dtolnay/rust-toolchain@nightly
- name: Install extra deps
run: |
sudo apt-get update && sudo apt-get install -y python3-pip
pip3 install jtbl
- name: Parse inputs
run: |
if [[ "${{ inputs.zulip }}" == "true" ]]; then
if [[ ! -z "${{ inputs.private }}" ]]; then
TYPE="private"
# Stream = private DM
STREAM="${{ inputs.private }}"
else
TYPE="stream"
if [[ ! -z "${{ inputs.stream }}" ]]; then
STREAM="${{ inputs.stream }}"
elif [[ -z "$STREAM" ]]; then
STREAM="light_client"
fi
if [[ ! -z "${{ inputs.topic }}" ]]; then
TOPIC="${{ inputs.topic }}"
elif [[ -z "$TOPIC" ]]; then
TOPIC="chat"
fi
fi
echo "TYPE=$TYPE" | tee -a $GITHUB_ENV
echo "STREAM=$STREAM" | tee -a $GITHUB_ENV
echo "TOPIC=$TOPIC" | tee -a $GITHUB_ENV
fi
- name: Run benchmarks
id: run-benchmarks
run: |
make bench-ci BENCH=${{ inputs.bench-name }} 2>&1 | tee out.txt
grep 'cycles=' out.txt >> cycles.txt
grep 'proving_time' out.txt >> timings.txt
while IFS=$'\t' read -r f1 f2
do
num_cycles=$(echo "$f1" | grep -o 'cycles=[0-9]*' | awk -F'=' '{ print $2 }')
echo "$f2" | jq -c ". += {\"cycles\": $num_cycles}" >> summary.json
done < <(paste cycles.txt timings.txt)
echo "# Benchmark Results " | tee -a summary.md
echo "## ${{ inputs.bench-name }} Prove" | tee -a summary.md
cat summary.json | jtbl -m | tee -a summary.md
echo "" | tee -a summary.md
echo "Time unit = milliseconds" | tee -a summary.md
echo "Workflow URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" | tee -a summary.md
echo "report<<EOF" >> $GITHUB_OUTPUT
cat summary.md >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
working-directory: ${{ github.workspace }}/aptos/light-client
- name: Write bench on commit comment
uses: peter-evans/commit-comment@v3
with:
body-path: ${{ github.workspace }}/aptos/light-client/summary.md
- name: Send report to Zulip
if: inputs.zulip
uses: zulip/github-actions-zulip/send-message@v1
with:
api-key: ${{ secrets.ZULIP_API_KEY }}
email: "[email protected]"
organization-url: "https://zulip.lurk-lab.com"
to: "${{ env.STREAM }}"
type: "${{ env.TYPE }}"
# Ignored if `type: private`
topic: "${{ env.TOPIC }}"
content: "${{ steps.run-benchmarks.outputs.report }}"