Skip to content

Merge pull request #1016 from meeseeksmachine/auto-backport-of-pr-101… #504

Merge pull request #1016 from meeseeksmachine/auto-backport-of-pr-101…

Merge pull request #1016 from meeseeksmachine/auto-backport-of-pr-101… #504

Workflow file for this run

name: UI Tests
on: [push, pull_request]
jobs:
ui-tests:
name: Visual Regression
runs-on: ubuntu-latest
strategy:
matrix:
python: [3.8]
fail-fast: false
steps:
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v1
with:
python-version: ${{ matrix.python }}
- name: Set up Node
uses: actions/setup-node@v1
with:
node-version: '14.x'
- name: Cache pip on Linux
uses: actions/cache@v2
if: startsWith(runner.os, 'Linux')
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ matrix.python }}-${{ hashFiles('setup.py') }}
restore-keys: |
${{ runner.os }}-pip-${{ matrix.python }}
- name: Get yarn cache directory path
id: yarn-cache-dir-path
run: echo "::set-output name=dir::$(yarn cache dir)"
- name: Cache yarn
uses: actions/cache@v2
id: yarn-cache # use this to check for `cache-hit` (`steps.yarn-cache.outputs.cache-hit != 'true'`)
with:
path: ${{ steps.yarn-cache-dir-path.outputs.dir }}
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
restore-keys: |
${{ runner.os }}-yarn-
- name: Install dependencies
run: |
python -m pip install --upgrade pip jupyterlab~=3.0 numpy bqplot matplotlib ipympl ipyvolume scipy
python -m pip install ".[test]"
jlpm
jlpm build
jupyter labextension develop . --overwrite
cd ui-tests
jlpm install --frozen-lockfile
- name: Launch Voila
run: |
cd ui-tests
# Mount a volume to overwrite the server configuration
jlpm start 2>&1 > /tmp/jupyterlab_server.log &
- name: Install browser
run: |
cd ui-tests
# Install only Chromium browser
jlpm playwright install chromium
- name: Wait for JupyterLab
uses: ifaxity/wait-on-action@v1
with:
resource: http-get://localhost:8866/
timeout: 360000
- uses: iterative/setup-cml@v1
- name: Test
env:
REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }}
REPORT: ./benchmark-results/voila-benchmark.md
shell: bash
run: |
cd ui-tests
jlpm run test
# Publish image to cml.dev
echo "" >> ${REPORT}
cml-publish ./benchmark-results/voila-benchmark.png --md >> ${REPORT}
echo "" >> ${REPORT}
# Test if metadata have changed
export METADATA_DIFF="/tmp/metadata.diff"
diff -u <(jq --sort-keys .metadata benchmark-results/voila-benchmark.json) <(jq --sort-keys .metadata voila-benchmark-expected.json) > ${METADATA_DIFF} || true
if [[ -s ${METADATA_DIFF} ]]; then
echo "<details><summary>:exclamation: Test metadata have changed</summary>" >> ${REPORT}
echo "" >> ${REPORT}
echo "\`\`\`diff" >> ${REPORT}
cat ${METADATA_DIFF} >> ${REPORT}
echo "\`\`\`" >> ${REPORT}
echo "" >> ${REPORT}
echo "</details>" >> ${REPORT}
fi
# Save PR number for comment publication
echo "${{ github.event.number }}" > ./benchmark-results/NR
- name: Upload Playwright Test assets
if: always()
uses: actions/upload-artifact@v2
with:
name: voila-test-assets
path: |
ui-tests/test-results
- name: Upload Playwright Benchmark report
if: always()
uses: actions/upload-artifact@v2
with:
name: voila-benchmark-report
path: |
ui-tests/benchmark-results
- name: Upload Playwright Test report
if: always()
uses: actions/upload-artifact@v2
with:
name: voila-test-report
path: |
ui-tests/playwright-report
- name: Print JupyterLab logs
if: always()
run: |
cat /tmp/jupyterlab_server.log