-
Notifications
You must be signed in to change notification settings - Fork 1
153 lines (151 loc) · 5.48 KB
/
_pystats.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
# Generated file: !!! DO NOT EDIT !!!
---
env:
PYPERFORMANCE_HASH: ebb37f3583e26ea22cee34126b3b8a815112370b
PYSTON_BENCHMARKS_HASH: 004743ccbd9e54598c543d7eb71fd3b8e10d5750
name: _pystats
on:
workflow_dispatch:
inputs:
fork:
description: Fork of cpython to benchmark
type: string
default: python
ref:
description: Branch, tag or (full) SHA commit to benchmark
type: string
default: main
benchmarks:
description: Benchmarks to run (comma-separated; empty runs all benchmarks)
type: string
force:
description: Rerun and replace results if commit already exists
type: boolean
individual:
description: Collect pystats for each individual benchmark
type: boolean
tier2:
description: tier 2 interpreter
type: boolean
default: false
jit:
description: JIT
type: boolean
default: false
nogil:
description: free threading
type: boolean
default: false
workflow_call:
inputs:
fork:
description: Fork of cpython to benchmark
type: string
ref:
description: Branch, tag or (full) SHA commit to benchmark
type: string
benchmarks:
description: Benchmarks to run (comma-separated; empty runs all benchmarks)
type: string
dry_run:
description: 'Dry run: Do not commit to the repo'
type: boolean
force:
description: Rerun and replace results if commit already exists
type: boolean
individual:
description: Collect pystats for each individual benchmark
type: boolean
tier2:
description: tier 2 interpreter
type: boolean
default: false
jit:
description: JIT
type: boolean
default: false
nogil:
description: free threading
type: boolean
default: false
jobs:
collect-stats:
runs-on: [self-hosted, linux]
steps:
- name: Checkout benchmarking
uses: actions/checkout@v4
- name: git gc
run: |
git gc
- name: Setup system Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Checkout CPython
uses: actions/checkout@v4
with:
repository: ${{ inputs.fork }}/cpython
ref: ${{ inputs.ref }}
path: cpython
fetch-depth: 50
- name: Install dependencies from PyPI
run: |
rm -rf venv
python -m venv venv
venv/bin/python -m pip install -r requirements.txt
- name: Should we run?
if: ${{ always() }}
id: should_run
run: |
venv/bin/python -m bench_runner should_run ${{ inputs.force }} ${{ inputs.fork }} ${{ inputs.ref }} all true ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }} >> $GITHUB_OUTPUT
- name: Checkout python-macrobenchmarks
uses: actions/checkout@v4
if: ${{ steps.should_run.outputs.should_run != 'false' }}
with:
repository: pyston/python-macrobenchmarks
path: pyston-benchmarks
ref: ${{ env.PYSTON_BENCHMARKS_HASH }}
- name: Checkout pyperformance
uses: actions/checkout@v4
if: ${{ steps.should_run.outputs.should_run != 'false' }}
with:
repository: mdboom/pyperformance
path: pyperformance
ref: ${{ env.PYPERFORMANCE_HASH }}
- name: Create pystats directory
if: ${{ steps.should_run.outputs.should_run != 'false' }}
run: |
# If we don't do this, stats are printed to the console
rm -rf /tmp/py_stats
mkdir /tmp/py_stats
- name: Build Python
if: ${{ steps.should_run.outputs.should_run != 'false' }}
run: |
cd cpython
./configure --enable-pystats --prefix=$PWD/install ${{ inputs.tier2 == true && '--enable-experimental-jit=interpreter' || '' }} ${{ inputs.jit == true && '--enable-experimental-jit=yes' || '' }} ${{ inputs.nogil == true && '--disable-gil' || '' }}
make -j4
make install
- name: Install pyperformance into the system python
if: ${{ steps.should_run.outputs.should_run != 'false' }}
run: |
venv/bin/python -m pip install --no-binary :all: ./pyperformance
- name: Running pyperformance
if: ${{ steps.should_run.outputs.should_run != 'false' }}
run: |
venv/bin/python -m bench_runner run_benchmarks pystats cpython/python ${{ inputs.fork }} ${{ inputs.ref }} ${{ inputs.benchmarks || 'all' }} ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }} --run_id ${{ github.run_id }} ${{ inputs.individual == true && '--individual' || '' }}
- name: Pull benchmarking
if: ${{ steps.should_run.outputs.should_run != 'false' }}
run: |
# Another benchmarking task may have created results for the same
# commit while the above was running. This "magic" incantation means
# that any local results for this commit will override anything we
# just pulled in in that case.
git pull -s recursive -X ours --autostash --rebase
- name: Add data to repo
if: ${{ steps.should_run.outputs.should_run != 'false' }}
uses: EndBug/add-and-commit@v9
with:
add: results
env:
flags: ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true &&
'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }}